lib: test_scanf: Add explicit type cast to result initialization in test_number_prefix()
[platform/kernel/linux-starfive.git] / lib / debugobjects.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Generic infrastructure for lifetime debugging of objects.
4  *
5  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
6  */
7
8 #define pr_fmt(fmt) "ODEBUG: " fmt
9
10 #include <linux/debugobjects.h>
11 #include <linux/interrupt.h>
12 #include <linux/sched.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/seq_file.h>
15 #include <linux/debugfs.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/kmemleak.h>
19 #include <linux/cpu.h>
20
21 #define ODEBUG_HASH_BITS        14
22 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
23
24 #define ODEBUG_POOL_SIZE        1024
25 #define ODEBUG_POOL_MIN_LEVEL   256
26 #define ODEBUG_POOL_PERCPU_SIZE 64
27 #define ODEBUG_BATCH_SIZE       16
28
29 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
30 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
31 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
32
33 /*
34  * We limit the freeing of debug objects via workqueue at a maximum
35  * frequency of 10Hz and about 1024 objects for each freeing operation.
36  * So it is freeing at most 10k debug objects per second.
37  */
38 #define ODEBUG_FREE_WORK_MAX    1024
39 #define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
40
41 struct debug_bucket {
42         struct hlist_head       list;
43         raw_spinlock_t          lock;
44 };
45
46 /*
47  * Debug object percpu free list
48  * Access is protected by disabling irq
49  */
50 struct debug_percpu_free {
51         struct hlist_head       free_objs;
52         int                     obj_free;
53 };
54
55 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
56
57 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
58
59 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
60
61 static DEFINE_RAW_SPINLOCK(pool_lock);
62
63 static HLIST_HEAD(obj_pool);
64 static HLIST_HEAD(obj_to_free);
65
66 /*
67  * Because of the presence of percpu free pools, obj_pool_free will
68  * under-count those in the percpu free pools. Similarly, obj_pool_used
69  * will over-count those in the percpu free pools. Adjustments will be
70  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
71  * can be off.
72  */
73 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
74 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
75 static int                      obj_pool_used;
76 static int                      obj_pool_max_used;
77 static bool                     obj_freeing;
78 /* The number of objs on the global free list */
79 static int                      obj_nr_tofree;
80
81 static int                      debug_objects_maxchain __read_mostly;
82 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
83 static int                      debug_objects_fixups __read_mostly;
84 static int                      debug_objects_warnings __read_mostly;
85 static int                      debug_objects_enabled __read_mostly
86                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
87 static int                      debug_objects_pool_size __read_mostly
88                                 = ODEBUG_POOL_SIZE;
89 static int                      debug_objects_pool_min_level __read_mostly
90                                 = ODEBUG_POOL_MIN_LEVEL;
91 static const struct debug_obj_descr *descr_test  __read_mostly;
92 static struct kmem_cache        *obj_cache __read_mostly;
93
94 /*
95  * Track numbers of kmem_cache_alloc()/free() calls done.
96  */
97 static int                      debug_objects_allocated;
98 static int                      debug_objects_freed;
99
100 static void free_obj_work(struct work_struct *work);
101 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
102
103 static int __init enable_object_debug(char *str)
104 {
105         debug_objects_enabled = 1;
106         return 0;
107 }
108
109 static int __init disable_object_debug(char *str)
110 {
111         debug_objects_enabled = 0;
112         return 0;
113 }
114
115 early_param("debug_objects", enable_object_debug);
116 early_param("no_debug_objects", disable_object_debug);
117
118 static const char *obj_states[ODEBUG_STATE_MAX] = {
119         [ODEBUG_STATE_NONE]             = "none",
120         [ODEBUG_STATE_INIT]             = "initialized",
121         [ODEBUG_STATE_INACTIVE]         = "inactive",
122         [ODEBUG_STATE_ACTIVE]           = "active",
123         [ODEBUG_STATE_DESTROYED]        = "destroyed",
124         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
125 };
126
127 static void fill_pool(void)
128 {
129         gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
130         struct debug_obj *obj;
131         unsigned long flags;
132
133         if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
134                 return;
135
136         /*
137          * Reuse objs from the global free list; they will be reinitialized
138          * when allocating.
139          *
140          * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
141          * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
142          * sections.
143          */
144         while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
145                 raw_spin_lock_irqsave(&pool_lock, flags);
146                 /*
147                  * Recheck with the lock held as the worker thread might have
148                  * won the race and freed the global free list already.
149                  */
150                 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
151                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
152                         hlist_del(&obj->node);
153                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
154                         hlist_add_head(&obj->node, &obj_pool);
155                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
156                 }
157                 raw_spin_unlock_irqrestore(&pool_lock, flags);
158         }
159
160         if (unlikely(!obj_cache))
161                 return;
162
163         while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
164                 struct debug_obj *new[ODEBUG_BATCH_SIZE];
165                 int cnt;
166
167                 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
168                         new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
169                         if (!new[cnt])
170                                 break;
171                 }
172                 if (!cnt)
173                         return;
174
175                 raw_spin_lock_irqsave(&pool_lock, flags);
176                 while (cnt) {
177                         hlist_add_head(&new[--cnt]->node, &obj_pool);
178                         debug_objects_allocated++;
179                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
180                 }
181                 raw_spin_unlock_irqrestore(&pool_lock, flags);
182         }
183 }
184
185 /*
186  * Lookup an object in the hash bucket.
187  */
188 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
189 {
190         struct debug_obj *obj;
191         int cnt = 0;
192
193         hlist_for_each_entry(obj, &b->list, node) {
194                 cnt++;
195                 if (obj->object == addr)
196                         return obj;
197         }
198         if (cnt > debug_objects_maxchain)
199                 debug_objects_maxchain = cnt;
200
201         return NULL;
202 }
203
204 /*
205  * Allocate a new object from the hlist
206  */
207 static struct debug_obj *__alloc_object(struct hlist_head *list)
208 {
209         struct debug_obj *obj = NULL;
210
211         if (list->first) {
212                 obj = hlist_entry(list->first, typeof(*obj), node);
213                 hlist_del(&obj->node);
214         }
215
216         return obj;
217 }
218
219 static struct debug_obj *
220 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
221 {
222         struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
223         struct debug_obj *obj;
224
225         if (likely(obj_cache)) {
226                 obj = __alloc_object(&percpu_pool->free_objs);
227                 if (obj) {
228                         percpu_pool->obj_free--;
229                         goto init_obj;
230                 }
231         }
232
233         raw_spin_lock(&pool_lock);
234         obj = __alloc_object(&obj_pool);
235         if (obj) {
236                 obj_pool_used++;
237                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
238
239                 /*
240                  * Looking ahead, allocate one batch of debug objects and
241                  * put them into the percpu free pool.
242                  */
243                 if (likely(obj_cache)) {
244                         int i;
245
246                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
247                                 struct debug_obj *obj2;
248
249                                 obj2 = __alloc_object(&obj_pool);
250                                 if (!obj2)
251                                         break;
252                                 hlist_add_head(&obj2->node,
253                                                &percpu_pool->free_objs);
254                                 percpu_pool->obj_free++;
255                                 obj_pool_used++;
256                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
257                         }
258                 }
259
260                 if (obj_pool_used > obj_pool_max_used)
261                         obj_pool_max_used = obj_pool_used;
262
263                 if (obj_pool_free < obj_pool_min_free)
264                         obj_pool_min_free = obj_pool_free;
265         }
266         raw_spin_unlock(&pool_lock);
267
268 init_obj:
269         if (obj) {
270                 obj->object = addr;
271                 obj->descr  = descr;
272                 obj->state  = ODEBUG_STATE_NONE;
273                 obj->astate = 0;
274                 hlist_add_head(&obj->node, &b->list);
275         }
276         return obj;
277 }
278
279 /*
280  * workqueue function to free objects.
281  *
282  * To reduce contention on the global pool_lock, the actual freeing of
283  * debug objects will be delayed if the pool_lock is busy.
284  */
285 static void free_obj_work(struct work_struct *work)
286 {
287         struct hlist_node *tmp;
288         struct debug_obj *obj;
289         unsigned long flags;
290         HLIST_HEAD(tofree);
291
292         WRITE_ONCE(obj_freeing, false);
293         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
294                 return;
295
296         if (obj_pool_free >= debug_objects_pool_size)
297                 goto free_objs;
298
299         /*
300          * The objs on the pool list might be allocated before the work is
301          * run, so recheck if pool list it full or not, if not fill pool
302          * list from the global free list. As it is likely that a workload
303          * may be gearing up to use more and more objects, don't free any
304          * of them until the next round.
305          */
306         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
307                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
308                 hlist_del(&obj->node);
309                 hlist_add_head(&obj->node, &obj_pool);
310                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
311                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
312         }
313         raw_spin_unlock_irqrestore(&pool_lock, flags);
314         return;
315
316 free_objs:
317         /*
318          * Pool list is already full and there are still objs on the free
319          * list. Move remaining free objs to a temporary list to free the
320          * memory outside the pool_lock held region.
321          */
322         if (obj_nr_tofree) {
323                 hlist_move_list(&obj_to_free, &tofree);
324                 debug_objects_freed += obj_nr_tofree;
325                 WRITE_ONCE(obj_nr_tofree, 0);
326         }
327         raw_spin_unlock_irqrestore(&pool_lock, flags);
328
329         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
330                 hlist_del(&obj->node);
331                 kmem_cache_free(obj_cache, obj);
332         }
333 }
334
335 static void __free_object(struct debug_obj *obj)
336 {
337         struct debug_obj *objs[ODEBUG_BATCH_SIZE];
338         struct debug_percpu_free *percpu_pool;
339         int lookahead_count = 0;
340         unsigned long flags;
341         bool work;
342
343         local_irq_save(flags);
344         if (!obj_cache)
345                 goto free_to_obj_pool;
346
347         /*
348          * Try to free it into the percpu pool first.
349          */
350         percpu_pool = this_cpu_ptr(&percpu_obj_pool);
351         if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
352                 hlist_add_head(&obj->node, &percpu_pool->free_objs);
353                 percpu_pool->obj_free++;
354                 local_irq_restore(flags);
355                 return;
356         }
357
358         /*
359          * As the percpu pool is full, look ahead and pull out a batch
360          * of objects from the percpu pool and free them as well.
361          */
362         for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
363                 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
364                 if (!objs[lookahead_count])
365                         break;
366                 percpu_pool->obj_free--;
367         }
368
369 free_to_obj_pool:
370         raw_spin_lock(&pool_lock);
371         work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
372                (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
373         obj_pool_used--;
374
375         if (work) {
376                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
377                 hlist_add_head(&obj->node, &obj_to_free);
378                 if (lookahead_count) {
379                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
380                         obj_pool_used -= lookahead_count;
381                         while (lookahead_count) {
382                                 hlist_add_head(&objs[--lookahead_count]->node,
383                                                &obj_to_free);
384                         }
385                 }
386
387                 if ((obj_pool_free > debug_objects_pool_size) &&
388                     (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
389                         int i;
390
391                         /*
392                          * Free one more batch of objects from obj_pool.
393                          */
394                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
395                                 obj = __alloc_object(&obj_pool);
396                                 hlist_add_head(&obj->node, &obj_to_free);
397                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
398                                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
399                         }
400                 }
401         } else {
402                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
403                 hlist_add_head(&obj->node, &obj_pool);
404                 if (lookahead_count) {
405                         WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
406                         obj_pool_used -= lookahead_count;
407                         while (lookahead_count) {
408                                 hlist_add_head(&objs[--lookahead_count]->node,
409                                                &obj_pool);
410                         }
411                 }
412         }
413         raw_spin_unlock(&pool_lock);
414         local_irq_restore(flags);
415 }
416
417 /*
418  * Put the object back into the pool and schedule work to free objects
419  * if necessary.
420  */
421 static void free_object(struct debug_obj *obj)
422 {
423         __free_object(obj);
424         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
425                 WRITE_ONCE(obj_freeing, true);
426                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
427         }
428 }
429
430 #ifdef CONFIG_HOTPLUG_CPU
431 static int object_cpu_offline(unsigned int cpu)
432 {
433         struct debug_percpu_free *percpu_pool;
434         struct hlist_node *tmp;
435         struct debug_obj *obj;
436         unsigned long flags;
437
438         /* Remote access is safe as the CPU is dead already */
439         percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
440         hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
441                 hlist_del(&obj->node);
442                 kmem_cache_free(obj_cache, obj);
443         }
444
445         raw_spin_lock_irqsave(&pool_lock, flags);
446         obj_pool_used -= percpu_pool->obj_free;
447         debug_objects_freed += percpu_pool->obj_free;
448         raw_spin_unlock_irqrestore(&pool_lock, flags);
449
450         percpu_pool->obj_free = 0;
451
452         return 0;
453 }
454 #endif
455
456 /*
457  * We run out of memory. That means we probably have tons of objects
458  * allocated.
459  */
460 static void debug_objects_oom(void)
461 {
462         struct debug_bucket *db = obj_hash;
463         struct hlist_node *tmp;
464         HLIST_HEAD(freelist);
465         struct debug_obj *obj;
466         unsigned long flags;
467         int i;
468
469         pr_warn("Out of memory. ODEBUG disabled\n");
470
471         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472                 raw_spin_lock_irqsave(&db->lock, flags);
473                 hlist_move_list(&db->list, &freelist);
474                 raw_spin_unlock_irqrestore(&db->lock, flags);
475
476                 /* Now free them */
477                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478                         hlist_del(&obj->node);
479                         free_object(obj);
480                 }
481         }
482 }
483
484 /*
485  * We use the pfn of the address for the hash. That way we can check
486  * for freed objects simply by checking the affected bucket.
487  */
488 static struct debug_bucket *get_bucket(unsigned long addr)
489 {
490         unsigned long hash;
491
492         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493         return &obj_hash[hash];
494 }
495
496 static void debug_print_object(struct debug_obj *obj, char *msg)
497 {
498         const struct debug_obj_descr *descr = obj->descr;
499         static int limit;
500
501         /*
502          * Don't report if lookup_object_or_alloc() by the current thread
503          * failed because lookup_object_or_alloc()/debug_objects_oom() by a
504          * concurrent thread turned off debug_objects_enabled and cleared
505          * the hash buckets.
506          */
507         if (!debug_objects_enabled)
508                 return;
509
510         if (limit < 5 && descr != descr_test) {
511                 void *hint = descr->debug_hint ?
512                         descr->debug_hint(obj->object) : NULL;
513                 limit++;
514                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
515                                  "object type: %s hint: %pS\n",
516                         msg, obj_states[obj->state], obj->astate,
517                         descr->name, hint);
518         }
519         debug_objects_warnings++;
520 }
521
522 /*
523  * Try to repair the damage, so we have a better chance to get useful
524  * debug output.
525  */
526 static bool
527 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
528                    void * addr, enum debug_obj_state state)
529 {
530         if (fixup && fixup(addr, state)) {
531                 debug_objects_fixups++;
532                 return true;
533         }
534         return false;
535 }
536
537 static void debug_object_is_on_stack(void *addr, int onstack)
538 {
539         int is_on_stack;
540         static int limit;
541
542         if (limit > 4)
543                 return;
544
545         is_on_stack = object_is_on_stack(addr);
546         if (is_on_stack == onstack)
547                 return;
548
549         limit++;
550         if (is_on_stack)
551                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
552                          task_stack_page(current));
553         else
554                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
555                          task_stack_page(current));
556
557         WARN_ON(1);
558 }
559
560 static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
561                                                 const struct debug_obj_descr *descr,
562                                                 bool onstack, bool alloc_ifstatic)
563 {
564         struct debug_obj *obj = lookup_object(addr, b);
565         enum debug_obj_state state = ODEBUG_STATE_NONE;
566
567         if (likely(obj))
568                 return obj;
569
570         /*
571          * debug_object_init() unconditionally allocates untracked
572          * objects. It does not matter whether it is a static object or
573          * not.
574          *
575          * debug_object_assert_init() and debug_object_activate() allow
576          * allocation only if the descriptor callback confirms that the
577          * object is static and considered initialized. For non-static
578          * objects the allocation needs to be done from the fixup callback.
579          */
580         if (unlikely(alloc_ifstatic)) {
581                 if (!descr->is_static_object || !descr->is_static_object(addr))
582                         return ERR_PTR(-ENOENT);
583                 /* Statically allocated objects are considered initialized */
584                 state = ODEBUG_STATE_INIT;
585         }
586
587         obj = alloc_object(addr, b, descr);
588         if (likely(obj)) {
589                 obj->state = state;
590                 debug_object_is_on_stack(addr, onstack);
591                 return obj;
592         }
593
594         /* Out of memory. Do the cleanup outside of the locked region */
595         debug_objects_enabled = 0;
596         return NULL;
597 }
598
599 static void debug_objects_fill_pool(void)
600 {
601         /*
602          * On RT enabled kernels the pool refill must happen in preemptible
603          * context:
604          */
605         if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
606                 fill_pool();
607 }
608
609 static void
610 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
611 {
612         enum debug_obj_state state;
613         struct debug_bucket *db;
614         struct debug_obj *obj;
615         unsigned long flags;
616
617         debug_objects_fill_pool();
618
619         db = get_bucket((unsigned long) addr);
620
621         raw_spin_lock_irqsave(&db->lock, flags);
622
623         obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
624         if (unlikely(!obj)) {
625                 raw_spin_unlock_irqrestore(&db->lock, flags);
626                 debug_objects_oom();
627                 return;
628         }
629
630         switch (obj->state) {
631         case ODEBUG_STATE_NONE:
632         case ODEBUG_STATE_INIT:
633         case ODEBUG_STATE_INACTIVE:
634                 obj->state = ODEBUG_STATE_INIT;
635                 break;
636
637         case ODEBUG_STATE_ACTIVE:
638                 state = obj->state;
639                 raw_spin_unlock_irqrestore(&db->lock, flags);
640                 debug_print_object(obj, "init");
641                 debug_object_fixup(descr->fixup_init, addr, state);
642                 return;
643
644         case ODEBUG_STATE_DESTROYED:
645                 raw_spin_unlock_irqrestore(&db->lock, flags);
646                 debug_print_object(obj, "init");
647                 return;
648         default:
649                 break;
650         }
651
652         raw_spin_unlock_irqrestore(&db->lock, flags);
653 }
654
655 /**
656  * debug_object_init - debug checks when an object is initialized
657  * @addr:       address of the object
658  * @descr:      pointer to an object specific debug description structure
659  */
660 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
661 {
662         if (!debug_objects_enabled)
663                 return;
664
665         __debug_object_init(addr, descr, 0);
666 }
667 EXPORT_SYMBOL_GPL(debug_object_init);
668
669 /**
670  * debug_object_init_on_stack - debug checks when an object on stack is
671  *                              initialized
672  * @addr:       address of the object
673  * @descr:      pointer to an object specific debug description structure
674  */
675 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
676 {
677         if (!debug_objects_enabled)
678                 return;
679
680         __debug_object_init(addr, descr, 1);
681 }
682 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
683
684 /**
685  * debug_object_activate - debug checks when an object is activated
686  * @addr:       address of the object
687  * @descr:      pointer to an object specific debug description structure
688  * Returns 0 for success, -EINVAL for check failed.
689  */
690 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
691 {
692         struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
693         enum debug_obj_state state;
694         struct debug_bucket *db;
695         struct debug_obj *obj;
696         unsigned long flags;
697         int ret;
698
699         if (!debug_objects_enabled)
700                 return 0;
701
702         debug_objects_fill_pool();
703
704         db = get_bucket((unsigned long) addr);
705
706         raw_spin_lock_irqsave(&db->lock, flags);
707
708         obj = lookup_object_or_alloc(addr, db, descr, false, true);
709         if (likely(!IS_ERR_OR_NULL(obj))) {
710                 bool print_object = false;
711
712                 switch (obj->state) {
713                 case ODEBUG_STATE_INIT:
714                 case ODEBUG_STATE_INACTIVE:
715                         obj->state = ODEBUG_STATE_ACTIVE;
716                         ret = 0;
717                         break;
718
719                 case ODEBUG_STATE_ACTIVE:
720                         state = obj->state;
721                         raw_spin_unlock_irqrestore(&db->lock, flags);
722                         debug_print_object(obj, "activate");
723                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
724                         return ret ? 0 : -EINVAL;
725
726                 case ODEBUG_STATE_DESTROYED:
727                         print_object = true;
728                         ret = -EINVAL;
729                         break;
730                 default:
731                         ret = 0;
732                         break;
733                 }
734                 raw_spin_unlock_irqrestore(&db->lock, flags);
735                 if (print_object)
736                         debug_print_object(obj, "activate");
737                 return ret;
738         }
739
740         raw_spin_unlock_irqrestore(&db->lock, flags);
741
742         /* If NULL the allocation has hit OOM */
743         if (!obj) {
744                 debug_objects_oom();
745                 return 0;
746         }
747
748         /* Object is neither static nor tracked. It's not initialized */
749         debug_print_object(&o, "activate");
750         ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
751         return ret ? 0 : -EINVAL;
752 }
753 EXPORT_SYMBOL_GPL(debug_object_activate);
754
755 /**
756  * debug_object_deactivate - debug checks when an object is deactivated
757  * @addr:       address of the object
758  * @descr:      pointer to an object specific debug description structure
759  */
760 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
761 {
762         struct debug_bucket *db;
763         struct debug_obj *obj;
764         unsigned long flags;
765         bool print_object = false;
766
767         if (!debug_objects_enabled)
768                 return;
769
770         db = get_bucket((unsigned long) addr);
771
772         raw_spin_lock_irqsave(&db->lock, flags);
773
774         obj = lookup_object(addr, db);
775         if (obj) {
776                 switch (obj->state) {
777                 case ODEBUG_STATE_INIT:
778                 case ODEBUG_STATE_INACTIVE:
779                 case ODEBUG_STATE_ACTIVE:
780                         if (!obj->astate)
781                                 obj->state = ODEBUG_STATE_INACTIVE;
782                         else
783                                 print_object = true;
784                         break;
785
786                 case ODEBUG_STATE_DESTROYED:
787                         print_object = true;
788                         break;
789                 default:
790                         break;
791                 }
792         }
793
794         raw_spin_unlock_irqrestore(&db->lock, flags);
795         if (!obj) {
796                 struct debug_obj o = { .object = addr,
797                                        .state = ODEBUG_STATE_NOTAVAILABLE,
798                                        .descr = descr };
799
800                 debug_print_object(&o, "deactivate");
801         } else if (print_object) {
802                 debug_print_object(obj, "deactivate");
803         }
804 }
805 EXPORT_SYMBOL_GPL(debug_object_deactivate);
806
807 /**
808  * debug_object_destroy - debug checks when an object is destroyed
809  * @addr:       address of the object
810  * @descr:      pointer to an object specific debug description structure
811  */
812 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
813 {
814         enum debug_obj_state state;
815         struct debug_bucket *db;
816         struct debug_obj *obj;
817         unsigned long flags;
818         bool print_object = false;
819
820         if (!debug_objects_enabled)
821                 return;
822
823         db = get_bucket((unsigned long) addr);
824
825         raw_spin_lock_irqsave(&db->lock, flags);
826
827         obj = lookup_object(addr, db);
828         if (!obj)
829                 goto out_unlock;
830
831         switch (obj->state) {
832         case ODEBUG_STATE_NONE:
833         case ODEBUG_STATE_INIT:
834         case ODEBUG_STATE_INACTIVE:
835                 obj->state = ODEBUG_STATE_DESTROYED;
836                 break;
837         case ODEBUG_STATE_ACTIVE:
838                 state = obj->state;
839                 raw_spin_unlock_irqrestore(&db->lock, flags);
840                 debug_print_object(obj, "destroy");
841                 debug_object_fixup(descr->fixup_destroy, addr, state);
842                 return;
843
844         case ODEBUG_STATE_DESTROYED:
845                 print_object = true;
846                 break;
847         default:
848                 break;
849         }
850 out_unlock:
851         raw_spin_unlock_irqrestore(&db->lock, flags);
852         if (print_object)
853                 debug_print_object(obj, "destroy");
854 }
855 EXPORT_SYMBOL_GPL(debug_object_destroy);
856
857 /**
858  * debug_object_free - debug checks when an object is freed
859  * @addr:       address of the object
860  * @descr:      pointer to an object specific debug description structure
861  */
862 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
863 {
864         enum debug_obj_state state;
865         struct debug_bucket *db;
866         struct debug_obj *obj;
867         unsigned long flags;
868
869         if (!debug_objects_enabled)
870                 return;
871
872         db = get_bucket((unsigned long) addr);
873
874         raw_spin_lock_irqsave(&db->lock, flags);
875
876         obj = lookup_object(addr, db);
877         if (!obj)
878                 goto out_unlock;
879
880         switch (obj->state) {
881         case ODEBUG_STATE_ACTIVE:
882                 state = obj->state;
883                 raw_spin_unlock_irqrestore(&db->lock, flags);
884                 debug_print_object(obj, "free");
885                 debug_object_fixup(descr->fixup_free, addr, state);
886                 return;
887         default:
888                 hlist_del(&obj->node);
889                 raw_spin_unlock_irqrestore(&db->lock, flags);
890                 free_object(obj);
891                 return;
892         }
893 out_unlock:
894         raw_spin_unlock_irqrestore(&db->lock, flags);
895 }
896 EXPORT_SYMBOL_GPL(debug_object_free);
897
898 /**
899  * debug_object_assert_init - debug checks when object should be init-ed
900  * @addr:       address of the object
901  * @descr:      pointer to an object specific debug description structure
902  */
903 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
904 {
905         struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
906         struct debug_bucket *db;
907         struct debug_obj *obj;
908         unsigned long flags;
909
910         if (!debug_objects_enabled)
911                 return;
912
913         debug_objects_fill_pool();
914
915         db = get_bucket((unsigned long) addr);
916
917         raw_spin_lock_irqsave(&db->lock, flags);
918         obj = lookup_object_or_alloc(addr, db, descr, false, true);
919         raw_spin_unlock_irqrestore(&db->lock, flags);
920         if (likely(!IS_ERR_OR_NULL(obj)))
921                 return;
922
923         /* If NULL the allocation has hit OOM */
924         if (!obj) {
925                 debug_objects_oom();
926                 return;
927         }
928
929         /* Object is neither tracked nor static. It's not initialized. */
930         debug_print_object(&o, "assert_init");
931         debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
932 }
933 EXPORT_SYMBOL_GPL(debug_object_assert_init);
934
935 /**
936  * debug_object_active_state - debug checks object usage state machine
937  * @addr:       address of the object
938  * @descr:      pointer to an object specific debug description structure
939  * @expect:     expected state
940  * @next:       state to move to if expected state is found
941  */
942 void
943 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
944                           unsigned int expect, unsigned int next)
945 {
946         struct debug_bucket *db;
947         struct debug_obj *obj;
948         unsigned long flags;
949         bool print_object = false;
950
951         if (!debug_objects_enabled)
952                 return;
953
954         db = get_bucket((unsigned long) addr);
955
956         raw_spin_lock_irqsave(&db->lock, flags);
957
958         obj = lookup_object(addr, db);
959         if (obj) {
960                 switch (obj->state) {
961                 case ODEBUG_STATE_ACTIVE:
962                         if (obj->astate == expect)
963                                 obj->astate = next;
964                         else
965                                 print_object = true;
966                         break;
967
968                 default:
969                         print_object = true;
970                         break;
971                 }
972         }
973
974         raw_spin_unlock_irqrestore(&db->lock, flags);
975         if (!obj) {
976                 struct debug_obj o = { .object = addr,
977                                        .state = ODEBUG_STATE_NOTAVAILABLE,
978                                        .descr = descr };
979
980                 debug_print_object(&o, "active_state");
981         } else if (print_object) {
982                 debug_print_object(obj, "active_state");
983         }
984 }
985 EXPORT_SYMBOL_GPL(debug_object_active_state);
986
987 #ifdef CONFIG_DEBUG_OBJECTS_FREE
988 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
989 {
990         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
991         const struct debug_obj_descr *descr;
992         enum debug_obj_state state;
993         struct debug_bucket *db;
994         struct hlist_node *tmp;
995         struct debug_obj *obj;
996         int cnt, objs_checked = 0;
997
998         saddr = (unsigned long) address;
999         eaddr = saddr + size;
1000         paddr = saddr & ODEBUG_CHUNK_MASK;
1001         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1002         chunks >>= ODEBUG_CHUNK_SHIFT;
1003
1004         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1005                 db = get_bucket(paddr);
1006
1007 repeat:
1008                 cnt = 0;
1009                 raw_spin_lock_irqsave(&db->lock, flags);
1010                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1011                         cnt++;
1012                         oaddr = (unsigned long) obj->object;
1013                         if (oaddr < saddr || oaddr >= eaddr)
1014                                 continue;
1015
1016                         switch (obj->state) {
1017                         case ODEBUG_STATE_ACTIVE:
1018                                 descr = obj->descr;
1019                                 state = obj->state;
1020                                 raw_spin_unlock_irqrestore(&db->lock, flags);
1021                                 debug_print_object(obj, "free");
1022                                 debug_object_fixup(descr->fixup_free,
1023                                                    (void *) oaddr, state);
1024                                 goto repeat;
1025                         default:
1026                                 hlist_del(&obj->node);
1027                                 __free_object(obj);
1028                                 break;
1029                         }
1030                 }
1031                 raw_spin_unlock_irqrestore(&db->lock, flags);
1032
1033                 if (cnt > debug_objects_maxchain)
1034                         debug_objects_maxchain = cnt;
1035
1036                 objs_checked += cnt;
1037         }
1038
1039         if (objs_checked > debug_objects_maxchecked)
1040                 debug_objects_maxchecked = objs_checked;
1041
1042         /* Schedule work to actually kmem_cache_free() objects */
1043         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1044                 WRITE_ONCE(obj_freeing, true);
1045                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1046         }
1047 }
1048
1049 void debug_check_no_obj_freed(const void *address, unsigned long size)
1050 {
1051         if (debug_objects_enabled)
1052                 __debug_check_no_obj_freed(address, size);
1053 }
1054 #endif
1055
1056 #ifdef CONFIG_DEBUG_FS
1057
1058 static int debug_stats_show(struct seq_file *m, void *v)
1059 {
1060         int cpu, obj_percpu_free = 0;
1061
1062         for_each_possible_cpu(cpu)
1063                 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1064
1065         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1066         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1067         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1068         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1069         seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1070         seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1071         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1072         seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1073         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1074         seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1075         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1076         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1077         return 0;
1078 }
1079 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1080
1081 static int __init debug_objects_init_debugfs(void)
1082 {
1083         struct dentry *dbgdir;
1084
1085         if (!debug_objects_enabled)
1086                 return 0;
1087
1088         dbgdir = debugfs_create_dir("debug_objects", NULL);
1089
1090         debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1091
1092         return 0;
1093 }
1094 __initcall(debug_objects_init_debugfs);
1095
1096 #else
1097 static inline void debug_objects_init_debugfs(void) { }
1098 #endif
1099
1100 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1101
1102 /* Random data structure for the self test */
1103 struct self_test {
1104         unsigned long   dummy1[6];
1105         int             static_init;
1106         unsigned long   dummy2[3];
1107 };
1108
1109 static __initconst const struct debug_obj_descr descr_type_test;
1110
1111 static bool __init is_static_object(void *addr)
1112 {
1113         struct self_test *obj = addr;
1114
1115         return obj->static_init;
1116 }
1117
1118 /*
1119  * fixup_init is called when:
1120  * - an active object is initialized
1121  */
1122 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1123 {
1124         struct self_test *obj = addr;
1125
1126         switch (state) {
1127         case ODEBUG_STATE_ACTIVE:
1128                 debug_object_deactivate(obj, &descr_type_test);
1129                 debug_object_init(obj, &descr_type_test);
1130                 return true;
1131         default:
1132                 return false;
1133         }
1134 }
1135
1136 /*
1137  * fixup_activate is called when:
1138  * - an active object is activated
1139  * - an unknown non-static object is activated
1140  */
1141 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1142 {
1143         struct self_test *obj = addr;
1144
1145         switch (state) {
1146         case ODEBUG_STATE_NOTAVAILABLE:
1147                 return true;
1148         case ODEBUG_STATE_ACTIVE:
1149                 debug_object_deactivate(obj, &descr_type_test);
1150                 debug_object_activate(obj, &descr_type_test);
1151                 return true;
1152
1153         default:
1154                 return false;
1155         }
1156 }
1157
1158 /*
1159  * fixup_destroy is called when:
1160  * - an active object is destroyed
1161  */
1162 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1163 {
1164         struct self_test *obj = addr;
1165
1166         switch (state) {
1167         case ODEBUG_STATE_ACTIVE:
1168                 debug_object_deactivate(obj, &descr_type_test);
1169                 debug_object_destroy(obj, &descr_type_test);
1170                 return true;
1171         default:
1172                 return false;
1173         }
1174 }
1175
1176 /*
1177  * fixup_free is called when:
1178  * - an active object is freed
1179  */
1180 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1181 {
1182         struct self_test *obj = addr;
1183
1184         switch (state) {
1185         case ODEBUG_STATE_ACTIVE:
1186                 debug_object_deactivate(obj, &descr_type_test);
1187                 debug_object_free(obj, &descr_type_test);
1188                 return true;
1189         default:
1190                 return false;
1191         }
1192 }
1193
1194 static int __init
1195 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1196 {
1197         struct debug_bucket *db;
1198         struct debug_obj *obj;
1199         unsigned long flags;
1200         int res = -EINVAL;
1201
1202         db = get_bucket((unsigned long) addr);
1203
1204         raw_spin_lock_irqsave(&db->lock, flags);
1205
1206         obj = lookup_object(addr, db);
1207         if (!obj && state != ODEBUG_STATE_NONE) {
1208                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1209                 goto out;
1210         }
1211         if (obj && obj->state != state) {
1212                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1213                        obj->state, state);
1214                 goto out;
1215         }
1216         if (fixups != debug_objects_fixups) {
1217                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1218                        fixups, debug_objects_fixups);
1219                 goto out;
1220         }
1221         if (warnings != debug_objects_warnings) {
1222                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1223                        warnings, debug_objects_warnings);
1224                 goto out;
1225         }
1226         res = 0;
1227 out:
1228         raw_spin_unlock_irqrestore(&db->lock, flags);
1229         if (res)
1230                 debug_objects_enabled = 0;
1231         return res;
1232 }
1233
1234 static __initconst const struct debug_obj_descr descr_type_test = {
1235         .name                   = "selftest",
1236         .is_static_object       = is_static_object,
1237         .fixup_init             = fixup_init,
1238         .fixup_activate         = fixup_activate,
1239         .fixup_destroy          = fixup_destroy,
1240         .fixup_free             = fixup_free,
1241 };
1242
1243 static __initdata struct self_test obj = { .static_init = 0 };
1244
1245 static void __init debug_objects_selftest(void)
1246 {
1247         int fixups, oldfixups, warnings, oldwarnings;
1248         unsigned long flags;
1249
1250         local_irq_save(flags);
1251
1252         fixups = oldfixups = debug_objects_fixups;
1253         warnings = oldwarnings = debug_objects_warnings;
1254         descr_test = &descr_type_test;
1255
1256         debug_object_init(&obj, &descr_type_test);
1257         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1258                 goto out;
1259         debug_object_activate(&obj, &descr_type_test);
1260         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1261                 goto out;
1262         debug_object_activate(&obj, &descr_type_test);
1263         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1264                 goto out;
1265         debug_object_deactivate(&obj, &descr_type_test);
1266         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1267                 goto out;
1268         debug_object_destroy(&obj, &descr_type_test);
1269         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1270                 goto out;
1271         debug_object_init(&obj, &descr_type_test);
1272         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1273                 goto out;
1274         debug_object_activate(&obj, &descr_type_test);
1275         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1276                 goto out;
1277         debug_object_deactivate(&obj, &descr_type_test);
1278         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1279                 goto out;
1280         debug_object_free(&obj, &descr_type_test);
1281         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1282                 goto out;
1283
1284         obj.static_init = 1;
1285         debug_object_activate(&obj, &descr_type_test);
1286         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1287                 goto out;
1288         debug_object_init(&obj, &descr_type_test);
1289         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1290                 goto out;
1291         debug_object_free(&obj, &descr_type_test);
1292         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1293                 goto out;
1294
1295 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1296         debug_object_init(&obj, &descr_type_test);
1297         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1298                 goto out;
1299         debug_object_activate(&obj, &descr_type_test);
1300         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1301                 goto out;
1302         __debug_check_no_obj_freed(&obj, sizeof(obj));
1303         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1304                 goto out;
1305 #endif
1306         pr_info("selftest passed\n");
1307
1308 out:
1309         debug_objects_fixups = oldfixups;
1310         debug_objects_warnings = oldwarnings;
1311         descr_test = NULL;
1312
1313         local_irq_restore(flags);
1314 }
1315 #else
1316 static inline void debug_objects_selftest(void) { }
1317 #endif
1318
1319 /*
1320  * Called during early boot to initialize the hash buckets and link
1321  * the static object pool objects into the poll list. After this call
1322  * the object tracker is fully operational.
1323  */
1324 void __init debug_objects_early_init(void)
1325 {
1326         int i;
1327
1328         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1329                 raw_spin_lock_init(&obj_hash[i].lock);
1330
1331         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1332                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1333 }
1334
1335 /*
1336  * Convert the statically allocated objects to dynamic ones:
1337  */
1338 static int __init debug_objects_replace_static_objects(void)
1339 {
1340         struct debug_bucket *db = obj_hash;
1341         struct hlist_node *tmp;
1342         struct debug_obj *obj, *new;
1343         HLIST_HEAD(objects);
1344         int i, cnt = 0;
1345
1346         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1347                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1348                 if (!obj)
1349                         goto free;
1350                 hlist_add_head(&obj->node, &objects);
1351         }
1352
1353         debug_objects_allocated += i;
1354
1355         /*
1356          * debug_objects_mem_init() is now called early that only one CPU is up
1357          * and interrupts have been disabled, so it is safe to replace the
1358          * active object references.
1359          */
1360
1361         /* Remove the statically allocated objects from the pool */
1362         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1363                 hlist_del(&obj->node);
1364         /* Move the allocated objects to the pool */
1365         hlist_move_list(&objects, &obj_pool);
1366
1367         /* Replace the active object references */
1368         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1369                 hlist_move_list(&db->list, &objects);
1370
1371                 hlist_for_each_entry(obj, &objects, node) {
1372                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1373                         hlist_del(&new->node);
1374                         /* copy object data */
1375                         *new = *obj;
1376                         hlist_add_head(&new->node, &db->list);
1377                         cnt++;
1378                 }
1379         }
1380
1381         pr_debug("%d of %d active objects replaced\n",
1382                  cnt, obj_pool_used);
1383         return 0;
1384 free:
1385         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1386                 hlist_del(&obj->node);
1387                 kmem_cache_free(obj_cache, obj);
1388         }
1389         return -ENOMEM;
1390 }
1391
1392 /*
1393  * Called after the kmem_caches are functional to setup a dedicated
1394  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1395  * prevents that the debug code is called on kmem_cache_free() for the
1396  * debug tracker objects to avoid recursive calls.
1397  */
1398 void __init debug_objects_mem_init(void)
1399 {
1400         int cpu, extras;
1401
1402         if (!debug_objects_enabled)
1403                 return;
1404
1405         /*
1406          * Initialize the percpu object pools
1407          *
1408          * Initialization is not strictly necessary, but was done for
1409          * completeness.
1410          */
1411         for_each_possible_cpu(cpu)
1412                 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1413
1414         obj_cache = kmem_cache_create("debug_objects_cache",
1415                                       sizeof (struct debug_obj), 0,
1416                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1417                                       NULL);
1418
1419         if (!obj_cache || debug_objects_replace_static_objects()) {
1420                 debug_objects_enabled = 0;
1421                 kmem_cache_destroy(obj_cache);
1422                 pr_warn("out of memory.\n");
1423                 return;
1424         } else
1425                 debug_objects_selftest();
1426
1427 #ifdef CONFIG_HOTPLUG_CPU
1428         cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1429                                         object_cpu_offline);
1430 #endif
1431
1432         /*
1433          * Increase the thresholds for allocating and freeing objects
1434          * according to the number of possible CPUs available in the system.
1435          */
1436         extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1437         debug_objects_pool_size += extras;
1438         debug_objects_pool_min_level += extras;
1439 }