1 // SPDX-License-Identifier: GPL-2.0
3 * Generic infrastructure for lifetime debugging of objects.
5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 #define pr_fmt(fmt) "ODEBUG: " fmt
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
21 #define ODEBUG_HASH_BITS 14
22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
24 #define ODEBUG_POOL_SIZE 1024
25 #define ODEBUG_POOL_MIN_LEVEL 256
26 #define ODEBUG_POOL_PERCPU_SIZE 64
27 #define ODEBUG_BATCH_SIZE 16
29 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
34 * We limit the freeing of debug objects via workqueue at a maximum
35 * frequency of 10Hz and about 1024 objects for each freeing operation.
36 * So it is freeing at most 10k debug objects per second.
38 #define ODEBUG_FREE_WORK_MAX 1024
39 #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10)
42 struct hlist_head list;
47 * Debug object percpu free list
48 * Access is protected by disabling irq
50 struct debug_percpu_free {
51 struct hlist_head free_objs;
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
57 static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE];
59 static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
61 static DEFINE_RAW_SPINLOCK(pool_lock);
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
67 * Because of the presence of percpu free pools, obj_pool_free will
68 * under-count those in the percpu free pools. Similarly, obj_pool_used
69 * will over-count those in the percpu free pools. Adjustments will be
70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73 static int obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int obj_pool_free = ODEBUG_POOL_SIZE;
75 static int obj_pool_used;
76 static int obj_pool_max_used;
77 static bool obj_freeing;
78 /* The number of objs on the global free list */
79 static int obj_nr_tofree;
81 static int debug_objects_maxchain __read_mostly;
82 static int __maybe_unused debug_objects_maxchecked __read_mostly;
83 static int debug_objects_fixups __read_mostly;
84 static int debug_objects_warnings __read_mostly;
85 static int debug_objects_enabled __read_mostly
86 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int debug_objects_pool_size __read_mostly
89 static int debug_objects_pool_min_level __read_mostly
90 = ODEBUG_POOL_MIN_LEVEL;
91 static const struct debug_obj_descr *descr_test __read_mostly;
92 static struct kmem_cache *obj_cache __read_mostly;
95 * Track numbers of kmem_cache_alloc()/free() calls done.
97 static int debug_objects_allocated;
98 static int debug_objects_freed;
100 static void free_obj_work(struct work_struct *work);
101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
103 static int __init enable_object_debug(char *str)
105 debug_objects_enabled = 1;
109 static int __init disable_object_debug(char *str)
111 debug_objects_enabled = 0;
115 early_param("debug_objects", enable_object_debug);
116 early_param("no_debug_objects", disable_object_debug);
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119 [ODEBUG_STATE_NONE] = "none",
120 [ODEBUG_STATE_INIT] = "initialized",
121 [ODEBUG_STATE_INACTIVE] = "inactive",
122 [ODEBUG_STATE_ACTIVE] = "active",
123 [ODEBUG_STATE_DESTROYED] = "destroyed",
124 [ODEBUG_STATE_NOTAVAILABLE] = "not available",
127 static void fill_pool(void)
129 gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
130 struct debug_obj *obj;
133 if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
137 * Reuse objs from the global free list; they will be reinitialized
140 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
144 while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
145 raw_spin_lock_irqsave(&pool_lock, flags);
147 * Recheck with the lock held as the worker thread might have
148 * won the race and freed the global free list already.
150 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
151 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152 hlist_del(&obj->node);
153 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154 hlist_add_head(&obj->node, &obj_pool);
155 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
157 raw_spin_unlock_irqrestore(&pool_lock, flags);
160 if (unlikely(!obj_cache))
163 while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164 struct debug_obj *new[ODEBUG_BATCH_SIZE];
167 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
168 new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
175 raw_spin_lock_irqsave(&pool_lock, flags);
177 hlist_add_head(&new[--cnt]->node, &obj_pool);
178 debug_objects_allocated++;
179 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
181 raw_spin_unlock_irqrestore(&pool_lock, flags);
186 * Lookup an object in the hash bucket.
188 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
190 struct debug_obj *obj;
193 hlist_for_each_entry(obj, &b->list, node) {
195 if (obj->object == addr)
198 if (cnt > debug_objects_maxchain)
199 debug_objects_maxchain = cnt;
205 * Allocate a new object from the hlist
207 static struct debug_obj *__alloc_object(struct hlist_head *list)
209 struct debug_obj *obj = NULL;
212 obj = hlist_entry(list->first, typeof(*obj), node);
213 hlist_del(&obj->node);
219 static struct debug_obj *
220 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
222 struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
223 struct debug_obj *obj;
225 if (likely(obj_cache)) {
226 obj = __alloc_object(&percpu_pool->free_objs);
228 percpu_pool->obj_free--;
233 raw_spin_lock(&pool_lock);
234 obj = __alloc_object(&obj_pool);
237 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
240 * Looking ahead, allocate one batch of debug objects and
241 * put them into the percpu free pool.
243 if (likely(obj_cache)) {
246 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
247 struct debug_obj *obj2;
249 obj2 = __alloc_object(&obj_pool);
252 hlist_add_head(&obj2->node,
253 &percpu_pool->free_objs);
254 percpu_pool->obj_free++;
256 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
260 if (obj_pool_used > obj_pool_max_used)
261 obj_pool_max_used = obj_pool_used;
263 if (obj_pool_free < obj_pool_min_free)
264 obj_pool_min_free = obj_pool_free;
266 raw_spin_unlock(&pool_lock);
272 obj->state = ODEBUG_STATE_NONE;
274 hlist_add_head(&obj->node, &b->list);
280 * workqueue function to free objects.
282 * To reduce contention on the global pool_lock, the actual freeing of
283 * debug objects will be delayed if the pool_lock is busy.
285 static void free_obj_work(struct work_struct *work)
287 struct hlist_node *tmp;
288 struct debug_obj *obj;
292 WRITE_ONCE(obj_freeing, false);
293 if (!raw_spin_trylock_irqsave(&pool_lock, flags))
296 if (obj_pool_free >= debug_objects_pool_size)
300 * The objs on the pool list might be allocated before the work is
301 * run, so recheck if pool list it full or not, if not fill pool
302 * list from the global free list. As it is likely that a workload
303 * may be gearing up to use more and more objects, don't free any
304 * of them until the next round.
306 while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
307 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
308 hlist_del(&obj->node);
309 hlist_add_head(&obj->node, &obj_pool);
310 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
311 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
313 raw_spin_unlock_irqrestore(&pool_lock, flags);
318 * Pool list is already full and there are still objs on the free
319 * list. Move remaining free objs to a temporary list to free the
320 * memory outside the pool_lock held region.
323 hlist_move_list(&obj_to_free, &tofree);
324 debug_objects_freed += obj_nr_tofree;
325 WRITE_ONCE(obj_nr_tofree, 0);
327 raw_spin_unlock_irqrestore(&pool_lock, flags);
329 hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
330 hlist_del(&obj->node);
331 kmem_cache_free(obj_cache, obj);
335 static void __free_object(struct debug_obj *obj)
337 struct debug_obj *objs[ODEBUG_BATCH_SIZE];
338 struct debug_percpu_free *percpu_pool;
339 int lookahead_count = 0;
343 local_irq_save(flags);
345 goto free_to_obj_pool;
348 * Try to free it into the percpu pool first.
350 percpu_pool = this_cpu_ptr(&percpu_obj_pool);
351 if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
352 hlist_add_head(&obj->node, &percpu_pool->free_objs);
353 percpu_pool->obj_free++;
354 local_irq_restore(flags);
359 * As the percpu pool is full, look ahead and pull out a batch
360 * of objects from the percpu pool and free them as well.
362 for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
363 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
364 if (!objs[lookahead_count])
366 percpu_pool->obj_free--;
370 raw_spin_lock(&pool_lock);
371 work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
372 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
376 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
377 hlist_add_head(&obj->node, &obj_to_free);
378 if (lookahead_count) {
379 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
380 obj_pool_used -= lookahead_count;
381 while (lookahead_count) {
382 hlist_add_head(&objs[--lookahead_count]->node,
387 if ((obj_pool_free > debug_objects_pool_size) &&
388 (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
392 * Free one more batch of objects from obj_pool.
394 for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
395 obj = __alloc_object(&obj_pool);
396 hlist_add_head(&obj->node, &obj_to_free);
397 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
398 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
402 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
403 hlist_add_head(&obj->node, &obj_pool);
404 if (lookahead_count) {
405 WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
406 obj_pool_used -= lookahead_count;
407 while (lookahead_count) {
408 hlist_add_head(&objs[--lookahead_count]->node,
413 raw_spin_unlock(&pool_lock);
414 local_irq_restore(flags);
418 * Put the object back into the pool and schedule work to free objects
421 static void free_object(struct debug_obj *obj)
424 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
425 WRITE_ONCE(obj_freeing, true);
426 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
430 #ifdef CONFIG_HOTPLUG_CPU
431 static int object_cpu_offline(unsigned int cpu)
433 struct debug_percpu_free *percpu_pool;
434 struct hlist_node *tmp;
435 struct debug_obj *obj;
438 /* Remote access is safe as the CPU is dead already */
439 percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
440 hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
441 hlist_del(&obj->node);
442 kmem_cache_free(obj_cache, obj);
445 raw_spin_lock_irqsave(&pool_lock, flags);
446 obj_pool_used -= percpu_pool->obj_free;
447 debug_objects_freed += percpu_pool->obj_free;
448 raw_spin_unlock_irqrestore(&pool_lock, flags);
450 percpu_pool->obj_free = 0;
457 * We run out of memory. That means we probably have tons of objects
460 static void debug_objects_oom(void)
462 struct debug_bucket *db = obj_hash;
463 struct hlist_node *tmp;
464 HLIST_HEAD(freelist);
465 struct debug_obj *obj;
469 pr_warn("Out of memory. ODEBUG disabled\n");
471 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472 raw_spin_lock_irqsave(&db->lock, flags);
473 hlist_move_list(&db->list, &freelist);
474 raw_spin_unlock_irqrestore(&db->lock, flags);
477 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478 hlist_del(&obj->node);
485 * We use the pfn of the address for the hash. That way we can check
486 * for freed objects simply by checking the affected bucket.
488 static struct debug_bucket *get_bucket(unsigned long addr)
492 hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493 return &obj_hash[hash];
496 static void debug_print_object(struct debug_obj *obj, char *msg)
498 const struct debug_obj_descr *descr = obj->descr;
502 * Don't report if lookup_object_or_alloc() by the current thread
503 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
504 * concurrent thread turned off debug_objects_enabled and cleared
507 if (!debug_objects_enabled)
510 if (limit < 5 && descr != descr_test) {
511 void *hint = descr->debug_hint ?
512 descr->debug_hint(obj->object) : NULL;
514 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
515 "object: %p object type: %s hint: %pS\n",
516 msg, obj_states[obj->state], obj->astate,
517 obj->object, descr->name, hint);
519 debug_objects_warnings++;
523 * Try to repair the damage, so we have a better chance to get useful
527 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
528 void * addr, enum debug_obj_state state)
530 if (fixup && fixup(addr, state)) {
531 debug_objects_fixups++;
537 static void debug_object_is_on_stack(void *addr, int onstack)
545 is_on_stack = object_is_on_stack(addr);
546 if (is_on_stack == onstack)
551 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
552 task_stack_page(current));
554 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
555 task_stack_page(current));
560 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
561 const struct debug_obj_descr *descr,
562 bool onstack, bool alloc_ifstatic)
564 struct debug_obj *obj = lookup_object(addr, b);
565 enum debug_obj_state state = ODEBUG_STATE_NONE;
571 * debug_object_init() unconditionally allocates untracked
572 * objects. It does not matter whether it is a static object or
575 * debug_object_assert_init() and debug_object_activate() allow
576 * allocation only if the descriptor callback confirms that the
577 * object is static and considered initialized. For non-static
578 * objects the allocation needs to be done from the fixup callback.
580 if (unlikely(alloc_ifstatic)) {
581 if (!descr->is_static_object || !descr->is_static_object(addr))
582 return ERR_PTR(-ENOENT);
583 /* Statically allocated objects are considered initialized */
584 state = ODEBUG_STATE_INIT;
587 obj = alloc_object(addr, b, descr);
590 debug_object_is_on_stack(addr, onstack);
594 /* Out of memory. Do the cleanup outside of the locked region */
595 debug_objects_enabled = 0;
599 static void debug_objects_fill_pool(void)
602 * On RT enabled kernels the pool refill must happen in preemptible
603 * context -- for !RT kernels we rely on the fact that spinlock_t and
604 * raw_spinlock_t are basically the same type and this lock-type
605 * inversion works just fine.
607 if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
609 * Annotate away the spinlock_t inside raw_spinlock_t warning
610 * by temporarily raising the wait-type to WAIT_SLEEP, matching
611 * the preemptible() condition above.
613 static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
614 lock_map_acquire_try(&fill_pool_map);
616 lock_map_release(&fill_pool_map);
621 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
623 enum debug_obj_state state;
624 struct debug_bucket *db;
625 struct debug_obj *obj;
628 debug_objects_fill_pool();
630 db = get_bucket((unsigned long) addr);
632 raw_spin_lock_irqsave(&db->lock, flags);
634 obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
635 if (unlikely(!obj)) {
636 raw_spin_unlock_irqrestore(&db->lock, flags);
641 switch (obj->state) {
642 case ODEBUG_STATE_NONE:
643 case ODEBUG_STATE_INIT:
644 case ODEBUG_STATE_INACTIVE:
645 obj->state = ODEBUG_STATE_INIT;
648 case ODEBUG_STATE_ACTIVE:
650 raw_spin_unlock_irqrestore(&db->lock, flags);
651 debug_print_object(obj, "init");
652 debug_object_fixup(descr->fixup_init, addr, state);
655 case ODEBUG_STATE_DESTROYED:
656 raw_spin_unlock_irqrestore(&db->lock, flags);
657 debug_print_object(obj, "init");
663 raw_spin_unlock_irqrestore(&db->lock, flags);
667 * debug_object_init - debug checks when an object is initialized
668 * @addr: address of the object
669 * @descr: pointer to an object specific debug description structure
671 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
673 if (!debug_objects_enabled)
676 __debug_object_init(addr, descr, 0);
678 EXPORT_SYMBOL_GPL(debug_object_init);
681 * debug_object_init_on_stack - debug checks when an object on stack is
683 * @addr: address of the object
684 * @descr: pointer to an object specific debug description structure
686 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
688 if (!debug_objects_enabled)
691 __debug_object_init(addr, descr, 1);
693 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
696 * debug_object_activate - debug checks when an object is activated
697 * @addr: address of the object
698 * @descr: pointer to an object specific debug description structure
699 * Returns 0 for success, -EINVAL for check failed.
701 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
703 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
704 enum debug_obj_state state;
705 struct debug_bucket *db;
706 struct debug_obj *obj;
710 if (!debug_objects_enabled)
713 debug_objects_fill_pool();
715 db = get_bucket((unsigned long) addr);
717 raw_spin_lock_irqsave(&db->lock, flags);
719 obj = lookup_object_or_alloc(addr, db, descr, false, true);
720 if (likely(!IS_ERR_OR_NULL(obj))) {
721 bool print_object = false;
723 switch (obj->state) {
724 case ODEBUG_STATE_INIT:
725 case ODEBUG_STATE_INACTIVE:
726 obj->state = ODEBUG_STATE_ACTIVE;
730 case ODEBUG_STATE_ACTIVE:
732 raw_spin_unlock_irqrestore(&db->lock, flags);
733 debug_print_object(obj, "activate");
734 ret = debug_object_fixup(descr->fixup_activate, addr, state);
735 return ret ? 0 : -EINVAL;
737 case ODEBUG_STATE_DESTROYED:
745 raw_spin_unlock_irqrestore(&db->lock, flags);
747 debug_print_object(obj, "activate");
751 raw_spin_unlock_irqrestore(&db->lock, flags);
753 /* If NULL the allocation has hit OOM */
759 /* Object is neither static nor tracked. It's not initialized */
760 debug_print_object(&o, "activate");
761 ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
762 return ret ? 0 : -EINVAL;
764 EXPORT_SYMBOL_GPL(debug_object_activate);
767 * debug_object_deactivate - debug checks when an object is deactivated
768 * @addr: address of the object
769 * @descr: pointer to an object specific debug description structure
771 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
773 struct debug_bucket *db;
774 struct debug_obj *obj;
776 bool print_object = false;
778 if (!debug_objects_enabled)
781 db = get_bucket((unsigned long) addr);
783 raw_spin_lock_irqsave(&db->lock, flags);
785 obj = lookup_object(addr, db);
787 switch (obj->state) {
788 case ODEBUG_STATE_INIT:
789 case ODEBUG_STATE_INACTIVE:
790 case ODEBUG_STATE_ACTIVE:
792 obj->state = ODEBUG_STATE_INACTIVE;
797 case ODEBUG_STATE_DESTROYED:
805 raw_spin_unlock_irqrestore(&db->lock, flags);
807 struct debug_obj o = { .object = addr,
808 .state = ODEBUG_STATE_NOTAVAILABLE,
811 debug_print_object(&o, "deactivate");
812 } else if (print_object) {
813 debug_print_object(obj, "deactivate");
816 EXPORT_SYMBOL_GPL(debug_object_deactivate);
819 * debug_object_destroy - debug checks when an object is destroyed
820 * @addr: address of the object
821 * @descr: pointer to an object specific debug description structure
823 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
825 enum debug_obj_state state;
826 struct debug_bucket *db;
827 struct debug_obj *obj;
829 bool print_object = false;
831 if (!debug_objects_enabled)
834 db = get_bucket((unsigned long) addr);
836 raw_spin_lock_irqsave(&db->lock, flags);
838 obj = lookup_object(addr, db);
842 switch (obj->state) {
843 case ODEBUG_STATE_NONE:
844 case ODEBUG_STATE_INIT:
845 case ODEBUG_STATE_INACTIVE:
846 obj->state = ODEBUG_STATE_DESTROYED;
848 case ODEBUG_STATE_ACTIVE:
850 raw_spin_unlock_irqrestore(&db->lock, flags);
851 debug_print_object(obj, "destroy");
852 debug_object_fixup(descr->fixup_destroy, addr, state);
855 case ODEBUG_STATE_DESTROYED:
862 raw_spin_unlock_irqrestore(&db->lock, flags);
864 debug_print_object(obj, "destroy");
866 EXPORT_SYMBOL_GPL(debug_object_destroy);
869 * debug_object_free - debug checks when an object is freed
870 * @addr: address of the object
871 * @descr: pointer to an object specific debug description structure
873 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
875 enum debug_obj_state state;
876 struct debug_bucket *db;
877 struct debug_obj *obj;
880 if (!debug_objects_enabled)
883 db = get_bucket((unsigned long) addr);
885 raw_spin_lock_irqsave(&db->lock, flags);
887 obj = lookup_object(addr, db);
891 switch (obj->state) {
892 case ODEBUG_STATE_ACTIVE:
894 raw_spin_unlock_irqrestore(&db->lock, flags);
895 debug_print_object(obj, "free");
896 debug_object_fixup(descr->fixup_free, addr, state);
899 hlist_del(&obj->node);
900 raw_spin_unlock_irqrestore(&db->lock, flags);
905 raw_spin_unlock_irqrestore(&db->lock, flags);
907 EXPORT_SYMBOL_GPL(debug_object_free);
910 * debug_object_assert_init - debug checks when object should be init-ed
911 * @addr: address of the object
912 * @descr: pointer to an object specific debug description structure
914 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
916 struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
917 struct debug_bucket *db;
918 struct debug_obj *obj;
921 if (!debug_objects_enabled)
924 debug_objects_fill_pool();
926 db = get_bucket((unsigned long) addr);
928 raw_spin_lock_irqsave(&db->lock, flags);
929 obj = lookup_object_or_alloc(addr, db, descr, false, true);
930 raw_spin_unlock_irqrestore(&db->lock, flags);
931 if (likely(!IS_ERR_OR_NULL(obj)))
934 /* If NULL the allocation has hit OOM */
940 /* Object is neither tracked nor static. It's not initialized. */
941 debug_print_object(&o, "assert_init");
942 debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
944 EXPORT_SYMBOL_GPL(debug_object_assert_init);
947 * debug_object_active_state - debug checks object usage state machine
948 * @addr: address of the object
949 * @descr: pointer to an object specific debug description structure
950 * @expect: expected state
951 * @next: state to move to if expected state is found
954 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
955 unsigned int expect, unsigned int next)
957 struct debug_bucket *db;
958 struct debug_obj *obj;
960 bool print_object = false;
962 if (!debug_objects_enabled)
965 db = get_bucket((unsigned long) addr);
967 raw_spin_lock_irqsave(&db->lock, flags);
969 obj = lookup_object(addr, db);
971 switch (obj->state) {
972 case ODEBUG_STATE_ACTIVE:
973 if (obj->astate == expect)
985 raw_spin_unlock_irqrestore(&db->lock, flags);
987 struct debug_obj o = { .object = addr,
988 .state = ODEBUG_STATE_NOTAVAILABLE,
991 debug_print_object(&o, "active_state");
992 } else if (print_object) {
993 debug_print_object(obj, "active_state");
996 EXPORT_SYMBOL_GPL(debug_object_active_state);
998 #ifdef CONFIG_DEBUG_OBJECTS_FREE
999 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1001 unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1002 const struct debug_obj_descr *descr;
1003 enum debug_obj_state state;
1004 struct debug_bucket *db;
1005 struct hlist_node *tmp;
1006 struct debug_obj *obj;
1007 int cnt, objs_checked = 0;
1009 saddr = (unsigned long) address;
1010 eaddr = saddr + size;
1011 paddr = saddr & ODEBUG_CHUNK_MASK;
1012 chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1013 chunks >>= ODEBUG_CHUNK_SHIFT;
1015 for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1016 db = get_bucket(paddr);
1020 raw_spin_lock_irqsave(&db->lock, flags);
1021 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1023 oaddr = (unsigned long) obj->object;
1024 if (oaddr < saddr || oaddr >= eaddr)
1027 switch (obj->state) {
1028 case ODEBUG_STATE_ACTIVE:
1031 raw_spin_unlock_irqrestore(&db->lock, flags);
1032 debug_print_object(obj, "free");
1033 debug_object_fixup(descr->fixup_free,
1034 (void *) oaddr, state);
1037 hlist_del(&obj->node);
1042 raw_spin_unlock_irqrestore(&db->lock, flags);
1044 if (cnt > debug_objects_maxchain)
1045 debug_objects_maxchain = cnt;
1047 objs_checked += cnt;
1050 if (objs_checked > debug_objects_maxchecked)
1051 debug_objects_maxchecked = objs_checked;
1053 /* Schedule work to actually kmem_cache_free() objects */
1054 if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1055 WRITE_ONCE(obj_freeing, true);
1056 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1060 void debug_check_no_obj_freed(const void *address, unsigned long size)
1062 if (debug_objects_enabled)
1063 __debug_check_no_obj_freed(address, size);
1067 #ifdef CONFIG_DEBUG_FS
1069 static int debug_stats_show(struct seq_file *m, void *v)
1071 int cpu, obj_percpu_free = 0;
1073 for_each_possible_cpu(cpu)
1074 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1076 seq_printf(m, "max_chain :%d\n", debug_objects_maxchain);
1077 seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked);
1078 seq_printf(m, "warnings :%d\n", debug_objects_warnings);
1079 seq_printf(m, "fixups :%d\n", debug_objects_fixups);
1080 seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1081 seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1082 seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1083 seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free);
1084 seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1085 seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree));
1086 seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1087 seq_printf(m, "objs_freed :%d\n", debug_objects_freed);
1090 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1092 static int __init debug_objects_init_debugfs(void)
1094 struct dentry *dbgdir;
1096 if (!debug_objects_enabled)
1099 dbgdir = debugfs_create_dir("debug_objects", NULL);
1101 debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1105 __initcall(debug_objects_init_debugfs);
1108 static inline void debug_objects_init_debugfs(void) { }
1111 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1113 /* Random data structure for the self test */
1115 unsigned long dummy1[6];
1117 unsigned long dummy2[3];
1120 static __initconst const struct debug_obj_descr descr_type_test;
1122 static bool __init is_static_object(void *addr)
1124 struct self_test *obj = addr;
1126 return obj->static_init;
1130 * fixup_init is called when:
1131 * - an active object is initialized
1133 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1135 struct self_test *obj = addr;
1138 case ODEBUG_STATE_ACTIVE:
1139 debug_object_deactivate(obj, &descr_type_test);
1140 debug_object_init(obj, &descr_type_test);
1148 * fixup_activate is called when:
1149 * - an active object is activated
1150 * - an unknown non-static object is activated
1152 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1154 struct self_test *obj = addr;
1157 case ODEBUG_STATE_NOTAVAILABLE:
1159 case ODEBUG_STATE_ACTIVE:
1160 debug_object_deactivate(obj, &descr_type_test);
1161 debug_object_activate(obj, &descr_type_test);
1170 * fixup_destroy is called when:
1171 * - an active object is destroyed
1173 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1175 struct self_test *obj = addr;
1178 case ODEBUG_STATE_ACTIVE:
1179 debug_object_deactivate(obj, &descr_type_test);
1180 debug_object_destroy(obj, &descr_type_test);
1188 * fixup_free is called when:
1189 * - an active object is freed
1191 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1193 struct self_test *obj = addr;
1196 case ODEBUG_STATE_ACTIVE:
1197 debug_object_deactivate(obj, &descr_type_test);
1198 debug_object_free(obj, &descr_type_test);
1206 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1208 struct debug_bucket *db;
1209 struct debug_obj *obj;
1210 unsigned long flags;
1213 db = get_bucket((unsigned long) addr);
1215 raw_spin_lock_irqsave(&db->lock, flags);
1217 obj = lookup_object(addr, db);
1218 if (!obj && state != ODEBUG_STATE_NONE) {
1219 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1222 if (obj && obj->state != state) {
1223 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1227 if (fixups != debug_objects_fixups) {
1228 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1229 fixups, debug_objects_fixups);
1232 if (warnings != debug_objects_warnings) {
1233 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1234 warnings, debug_objects_warnings);
1239 raw_spin_unlock_irqrestore(&db->lock, flags);
1241 debug_objects_enabled = 0;
1245 static __initconst const struct debug_obj_descr descr_type_test = {
1247 .is_static_object = is_static_object,
1248 .fixup_init = fixup_init,
1249 .fixup_activate = fixup_activate,
1250 .fixup_destroy = fixup_destroy,
1251 .fixup_free = fixup_free,
1254 static __initdata struct self_test obj = { .static_init = 0 };
1256 static void __init debug_objects_selftest(void)
1258 int fixups, oldfixups, warnings, oldwarnings;
1259 unsigned long flags;
1261 local_irq_save(flags);
1263 fixups = oldfixups = debug_objects_fixups;
1264 warnings = oldwarnings = debug_objects_warnings;
1265 descr_test = &descr_type_test;
1267 debug_object_init(&obj, &descr_type_test);
1268 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1270 debug_object_activate(&obj, &descr_type_test);
1271 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1273 debug_object_activate(&obj, &descr_type_test);
1274 if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1276 debug_object_deactivate(&obj, &descr_type_test);
1277 if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1279 debug_object_destroy(&obj, &descr_type_test);
1280 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1282 debug_object_init(&obj, &descr_type_test);
1283 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1285 debug_object_activate(&obj, &descr_type_test);
1286 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1288 debug_object_deactivate(&obj, &descr_type_test);
1289 if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1291 debug_object_free(&obj, &descr_type_test);
1292 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1295 obj.static_init = 1;
1296 debug_object_activate(&obj, &descr_type_test);
1297 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1299 debug_object_init(&obj, &descr_type_test);
1300 if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1302 debug_object_free(&obj, &descr_type_test);
1303 if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1306 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1307 debug_object_init(&obj, &descr_type_test);
1308 if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1310 debug_object_activate(&obj, &descr_type_test);
1311 if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1313 __debug_check_no_obj_freed(&obj, sizeof(obj));
1314 if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1317 pr_info("selftest passed\n");
1320 debug_objects_fixups = oldfixups;
1321 debug_objects_warnings = oldwarnings;
1324 local_irq_restore(flags);
1327 static inline void debug_objects_selftest(void) { }
1331 * Called during early boot to initialize the hash buckets and link
1332 * the static object pool objects into the poll list. After this call
1333 * the object tracker is fully operational.
1335 void __init debug_objects_early_init(void)
1339 for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1340 raw_spin_lock_init(&obj_hash[i].lock);
1342 for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1343 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1347 * Convert the statically allocated objects to dynamic ones:
1349 static int __init debug_objects_replace_static_objects(void)
1351 struct debug_bucket *db = obj_hash;
1352 struct hlist_node *tmp;
1353 struct debug_obj *obj, *new;
1354 HLIST_HEAD(objects);
1357 for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1358 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1361 hlist_add_head(&obj->node, &objects);
1364 debug_objects_allocated += i;
1367 * debug_objects_mem_init() is now called early that only one CPU is up
1368 * and interrupts have been disabled, so it is safe to replace the
1369 * active object references.
1372 /* Remove the statically allocated objects from the pool */
1373 hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1374 hlist_del(&obj->node);
1375 /* Move the allocated objects to the pool */
1376 hlist_move_list(&objects, &obj_pool);
1378 /* Replace the active object references */
1379 for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1380 hlist_move_list(&db->list, &objects);
1382 hlist_for_each_entry(obj, &objects, node) {
1383 new = hlist_entry(obj_pool.first, typeof(*obj), node);
1384 hlist_del(&new->node);
1385 /* copy object data */
1387 hlist_add_head(&new->node, &db->list);
1392 pr_debug("%d of %d active objects replaced\n",
1393 cnt, obj_pool_used);
1396 hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1397 hlist_del(&obj->node);
1398 kmem_cache_free(obj_cache, obj);
1404 * Called after the kmem_caches are functional to setup a dedicated
1405 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1406 * prevents that the debug code is called on kmem_cache_free() for the
1407 * debug tracker objects to avoid recursive calls.
1409 void __init debug_objects_mem_init(void)
1413 if (!debug_objects_enabled)
1417 * Initialize the percpu object pools
1419 * Initialization is not strictly necessary, but was done for
1422 for_each_possible_cpu(cpu)
1423 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1425 obj_cache = kmem_cache_create("debug_objects_cache",
1426 sizeof (struct debug_obj), 0,
1427 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1430 if (!obj_cache || debug_objects_replace_static_objects()) {
1431 debug_objects_enabled = 0;
1432 kmem_cache_destroy(obj_cache);
1433 pr_warn("out of memory.\n");
1436 debug_objects_selftest();
1438 #ifdef CONFIG_HOTPLUG_CPU
1439 cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1440 object_cpu_offline);
1444 * Increase the thresholds for allocating and freeing objects
1445 * according to the number of possible CPUs available in the system.
1447 extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1448 debug_objects_pool_size += extras;
1449 debug_objects_pool_min_level += extras;