Merge tag 'for-5.15-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
[platform/kernel/linux-rpi.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22 #include <linux/cpu.h>
23
24 #define ODEBUG_HASH_BITS        14
25 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
26
27 #define ODEBUG_POOL_SIZE        1024
28 #define ODEBUG_POOL_MIN_LEVEL   256
29 #define ODEBUG_POOL_PERCPU_SIZE 64
30 #define ODEBUG_BATCH_SIZE       16
31
32 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
33 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
34 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
35
36 /*
37  * We limit the freeing of debug objects via workqueue at a maximum
38  * frequency of 10Hz and about 1024 objects for each freeing operation.
39  * So it is freeing at most 10k debug objects per second.
40  */
41 #define ODEBUG_FREE_WORK_MAX    1024
42 #define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
43
44 struct debug_bucket {
45         struct hlist_head       list;
46         raw_spinlock_t          lock;
47 };
48
49 /*
50  * Debug object percpu free list
51  * Access is protected by disabling irq
52  */
53 struct debug_percpu_free {
54         struct hlist_head       free_objs;
55         int                     obj_free;
56 };
57
58 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
59
60 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
61
62 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
63
64 static DEFINE_RAW_SPINLOCK(pool_lock);
65
66 static HLIST_HEAD(obj_pool);
67 static HLIST_HEAD(obj_to_free);
68
69 /*
70  * Because of the presence of percpu free pools, obj_pool_free will
71  * under-count those in the percpu free pools. Similarly, obj_pool_used
72  * will over-count those in the percpu free pools. Adjustments will be
73  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
74  * can be off.
75  */
76 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
77 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
78 static int                      obj_pool_used;
79 static int                      obj_pool_max_used;
80 static bool                     obj_freeing;
81 /* The number of objs on the global free list */
82 static int                      obj_nr_tofree;
83
84 static int                      debug_objects_maxchain __read_mostly;
85 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
86 static int                      debug_objects_fixups __read_mostly;
87 static int                      debug_objects_warnings __read_mostly;
88 static int                      debug_objects_enabled __read_mostly
89                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
90 static int                      debug_objects_pool_size __read_mostly
91                                 = ODEBUG_POOL_SIZE;
92 static int                      debug_objects_pool_min_level __read_mostly
93                                 = ODEBUG_POOL_MIN_LEVEL;
94 static const struct debug_obj_descr *descr_test  __read_mostly;
95 static struct kmem_cache        *obj_cache __read_mostly;
96
97 /*
98  * Track numbers of kmem_cache_alloc()/free() calls done.
99  */
100 static int                      debug_objects_allocated;
101 static int                      debug_objects_freed;
102
103 static void free_obj_work(struct work_struct *work);
104 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
105
106 static int __init enable_object_debug(char *str)
107 {
108         debug_objects_enabled = 1;
109         return 0;
110 }
111
112 static int __init disable_object_debug(char *str)
113 {
114         debug_objects_enabled = 0;
115         return 0;
116 }
117
118 early_param("debug_objects", enable_object_debug);
119 early_param("no_debug_objects", disable_object_debug);
120
121 static const char *obj_states[ODEBUG_STATE_MAX] = {
122         [ODEBUG_STATE_NONE]             = "none",
123         [ODEBUG_STATE_INIT]             = "initialized",
124         [ODEBUG_STATE_INACTIVE]         = "inactive",
125         [ODEBUG_STATE_ACTIVE]           = "active",
126         [ODEBUG_STATE_DESTROYED]        = "destroyed",
127         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
128 };
129
130 static void fill_pool(void)
131 {
132         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
133         struct debug_obj *obj;
134         unsigned long flags;
135
136         if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
137                 return;
138
139         /*
140          * Reuse objs from the global free list; they will be reinitialized
141          * when allocating.
142          *
143          * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
144          * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
145          * sections.
146          */
147         while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
148                 raw_spin_lock_irqsave(&pool_lock, flags);
149                 /*
150                  * Recheck with the lock held as the worker thread might have
151                  * won the race and freed the global free list already.
152                  */
153                 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
154                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
155                         hlist_del(&obj->node);
156                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
157                         hlist_add_head(&obj->node, &obj_pool);
158                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
159                 }
160                 raw_spin_unlock_irqrestore(&pool_lock, flags);
161         }
162
163         if (unlikely(!obj_cache))
164                 return;
165
166         while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
167                 struct debug_obj *new[ODEBUG_BATCH_SIZE];
168                 int cnt;
169
170                 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
171                         new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
172                         if (!new[cnt])
173                                 break;
174                 }
175                 if (!cnt)
176                         return;
177
178                 raw_spin_lock_irqsave(&pool_lock, flags);
179                 while (cnt) {
180                         hlist_add_head(&new[--cnt]->node, &obj_pool);
181                         debug_objects_allocated++;
182                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
183                 }
184                 raw_spin_unlock_irqrestore(&pool_lock, flags);
185         }
186 }
187
188 /*
189  * Lookup an object in the hash bucket.
190  */
191 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
192 {
193         struct debug_obj *obj;
194         int cnt = 0;
195
196         hlist_for_each_entry(obj, &b->list, node) {
197                 cnt++;
198                 if (obj->object == addr)
199                         return obj;
200         }
201         if (cnt > debug_objects_maxchain)
202                 debug_objects_maxchain = cnt;
203
204         return NULL;
205 }
206
207 /*
208  * Allocate a new object from the hlist
209  */
210 static struct debug_obj *__alloc_object(struct hlist_head *list)
211 {
212         struct debug_obj *obj = NULL;
213
214         if (list->first) {
215                 obj = hlist_entry(list->first, typeof(*obj), node);
216                 hlist_del(&obj->node);
217         }
218
219         return obj;
220 }
221
222 /*
223  * Allocate a new object. If the pool is empty, switch off the debugger.
224  * Must be called with interrupts disabled.
225  */
226 static struct debug_obj *
227 alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
228 {
229         struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
230         struct debug_obj *obj;
231
232         if (likely(obj_cache)) {
233                 obj = __alloc_object(&percpu_pool->free_objs);
234                 if (obj) {
235                         percpu_pool->obj_free--;
236                         goto init_obj;
237                 }
238         }
239
240         raw_spin_lock(&pool_lock);
241         obj = __alloc_object(&obj_pool);
242         if (obj) {
243                 obj_pool_used++;
244                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
245
246                 /*
247                  * Looking ahead, allocate one batch of debug objects and
248                  * put them into the percpu free pool.
249                  */
250                 if (likely(obj_cache)) {
251                         int i;
252
253                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
254                                 struct debug_obj *obj2;
255
256                                 obj2 = __alloc_object(&obj_pool);
257                                 if (!obj2)
258                                         break;
259                                 hlist_add_head(&obj2->node,
260                                                &percpu_pool->free_objs);
261                                 percpu_pool->obj_free++;
262                                 obj_pool_used++;
263                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
264                         }
265                 }
266
267                 if (obj_pool_used > obj_pool_max_used)
268                         obj_pool_max_used = obj_pool_used;
269
270                 if (obj_pool_free < obj_pool_min_free)
271                         obj_pool_min_free = obj_pool_free;
272         }
273         raw_spin_unlock(&pool_lock);
274
275 init_obj:
276         if (obj) {
277                 obj->object = addr;
278                 obj->descr  = descr;
279                 obj->state  = ODEBUG_STATE_NONE;
280                 obj->astate = 0;
281                 hlist_add_head(&obj->node, &b->list);
282         }
283         return obj;
284 }
285
286 /*
287  * workqueue function to free objects.
288  *
289  * To reduce contention on the global pool_lock, the actual freeing of
290  * debug objects will be delayed if the pool_lock is busy.
291  */
292 static void free_obj_work(struct work_struct *work)
293 {
294         struct hlist_node *tmp;
295         struct debug_obj *obj;
296         unsigned long flags;
297         HLIST_HEAD(tofree);
298
299         WRITE_ONCE(obj_freeing, false);
300         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
301                 return;
302
303         if (obj_pool_free >= debug_objects_pool_size)
304                 goto free_objs;
305
306         /*
307          * The objs on the pool list might be allocated before the work is
308          * run, so recheck if pool list it full or not, if not fill pool
309          * list from the global free list. As it is likely that a workload
310          * may be gearing up to use more and more objects, don't free any
311          * of them until the next round.
312          */
313         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
314                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
315                 hlist_del(&obj->node);
316                 hlist_add_head(&obj->node, &obj_pool);
317                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
318                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
319         }
320         raw_spin_unlock_irqrestore(&pool_lock, flags);
321         return;
322
323 free_objs:
324         /*
325          * Pool list is already full and there are still objs on the free
326          * list. Move remaining free objs to a temporary list to free the
327          * memory outside the pool_lock held region.
328          */
329         if (obj_nr_tofree) {
330                 hlist_move_list(&obj_to_free, &tofree);
331                 debug_objects_freed += obj_nr_tofree;
332                 WRITE_ONCE(obj_nr_tofree, 0);
333         }
334         raw_spin_unlock_irqrestore(&pool_lock, flags);
335
336         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
337                 hlist_del(&obj->node);
338                 kmem_cache_free(obj_cache, obj);
339         }
340 }
341
342 static void __free_object(struct debug_obj *obj)
343 {
344         struct debug_obj *objs[ODEBUG_BATCH_SIZE];
345         struct debug_percpu_free *percpu_pool;
346         int lookahead_count = 0;
347         unsigned long flags;
348         bool work;
349
350         local_irq_save(flags);
351         if (!obj_cache)
352                 goto free_to_obj_pool;
353
354         /*
355          * Try to free it into the percpu pool first.
356          */
357         percpu_pool = this_cpu_ptr(&percpu_obj_pool);
358         if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
359                 hlist_add_head(&obj->node, &percpu_pool->free_objs);
360                 percpu_pool->obj_free++;
361                 local_irq_restore(flags);
362                 return;
363         }
364
365         /*
366          * As the percpu pool is full, look ahead and pull out a batch
367          * of objects from the percpu pool and free them as well.
368          */
369         for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
370                 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
371                 if (!objs[lookahead_count])
372                         break;
373                 percpu_pool->obj_free--;
374         }
375
376 free_to_obj_pool:
377         raw_spin_lock(&pool_lock);
378         work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
379                (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
380         obj_pool_used--;
381
382         if (work) {
383                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
384                 hlist_add_head(&obj->node, &obj_to_free);
385                 if (lookahead_count) {
386                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
387                         obj_pool_used -= lookahead_count;
388                         while (lookahead_count) {
389                                 hlist_add_head(&objs[--lookahead_count]->node,
390                                                &obj_to_free);
391                         }
392                 }
393
394                 if ((obj_pool_free > debug_objects_pool_size) &&
395                     (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
396                         int i;
397
398                         /*
399                          * Free one more batch of objects from obj_pool.
400                          */
401                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
402                                 obj = __alloc_object(&obj_pool);
403                                 hlist_add_head(&obj->node, &obj_to_free);
404                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
405                                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
406                         }
407                 }
408         } else {
409                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
410                 hlist_add_head(&obj->node, &obj_pool);
411                 if (lookahead_count) {
412                         WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
413                         obj_pool_used -= lookahead_count;
414                         while (lookahead_count) {
415                                 hlist_add_head(&objs[--lookahead_count]->node,
416                                                &obj_pool);
417                         }
418                 }
419         }
420         raw_spin_unlock(&pool_lock);
421         local_irq_restore(flags);
422 }
423
424 /*
425  * Put the object back into the pool and schedule work to free objects
426  * if necessary.
427  */
428 static void free_object(struct debug_obj *obj)
429 {
430         __free_object(obj);
431         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
432                 WRITE_ONCE(obj_freeing, true);
433                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
434         }
435 }
436
437 #ifdef CONFIG_HOTPLUG_CPU
438 static int object_cpu_offline(unsigned int cpu)
439 {
440         struct debug_percpu_free *percpu_pool;
441         struct hlist_node *tmp;
442         struct debug_obj *obj;
443
444         /* Remote access is safe as the CPU is dead already */
445         percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
446         hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
447                 hlist_del(&obj->node);
448                 kmem_cache_free(obj_cache, obj);
449         }
450         percpu_pool->obj_free = 0;
451
452         return 0;
453 }
454 #endif
455
456 /*
457  * We run out of memory. That means we probably have tons of objects
458  * allocated.
459  */
460 static void debug_objects_oom(void)
461 {
462         struct debug_bucket *db = obj_hash;
463         struct hlist_node *tmp;
464         HLIST_HEAD(freelist);
465         struct debug_obj *obj;
466         unsigned long flags;
467         int i;
468
469         pr_warn("Out of memory. ODEBUG disabled\n");
470
471         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
472                 raw_spin_lock_irqsave(&db->lock, flags);
473                 hlist_move_list(&db->list, &freelist);
474                 raw_spin_unlock_irqrestore(&db->lock, flags);
475
476                 /* Now free them */
477                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
478                         hlist_del(&obj->node);
479                         free_object(obj);
480                 }
481         }
482 }
483
484 /*
485  * We use the pfn of the address for the hash. That way we can check
486  * for freed objects simply by checking the affected bucket.
487  */
488 static struct debug_bucket *get_bucket(unsigned long addr)
489 {
490         unsigned long hash;
491
492         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
493         return &obj_hash[hash];
494 }
495
496 static void debug_print_object(struct debug_obj *obj, char *msg)
497 {
498         const struct debug_obj_descr *descr = obj->descr;
499         static int limit;
500
501         if (limit < 5 && descr != descr_test) {
502                 void *hint = descr->debug_hint ?
503                         descr->debug_hint(obj->object) : NULL;
504                 limit++;
505                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
506                                  "object type: %s hint: %pS\n",
507                         msg, obj_states[obj->state], obj->astate,
508                         descr->name, hint);
509         }
510         debug_objects_warnings++;
511 }
512
513 /*
514  * Try to repair the damage, so we have a better chance to get useful
515  * debug output.
516  */
517 static bool
518 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
519                    void * addr, enum debug_obj_state state)
520 {
521         if (fixup && fixup(addr, state)) {
522                 debug_objects_fixups++;
523                 return true;
524         }
525         return false;
526 }
527
528 static void debug_object_is_on_stack(void *addr, int onstack)
529 {
530         int is_on_stack;
531         static int limit;
532
533         if (limit > 4)
534                 return;
535
536         is_on_stack = object_is_on_stack(addr);
537         if (is_on_stack == onstack)
538                 return;
539
540         limit++;
541         if (is_on_stack)
542                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
543                          task_stack_page(current));
544         else
545                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
546                          task_stack_page(current));
547
548         WARN_ON(1);
549 }
550
551 static void
552 __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
553 {
554         enum debug_obj_state state;
555         bool check_stack = false;
556         struct debug_bucket *db;
557         struct debug_obj *obj;
558         unsigned long flags;
559
560         /*
561          * On RT enabled kernels the pool refill must happen in preemptible
562          * context:
563          */
564         if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible())
565                 fill_pool();
566
567         db = get_bucket((unsigned long) addr);
568
569         raw_spin_lock_irqsave(&db->lock, flags);
570
571         obj = lookup_object(addr, db);
572         if (!obj) {
573                 obj = alloc_object(addr, db, descr);
574                 if (!obj) {
575                         debug_objects_enabled = 0;
576                         raw_spin_unlock_irqrestore(&db->lock, flags);
577                         debug_objects_oom();
578                         return;
579                 }
580                 check_stack = true;
581         }
582
583         switch (obj->state) {
584         case ODEBUG_STATE_NONE:
585         case ODEBUG_STATE_INIT:
586         case ODEBUG_STATE_INACTIVE:
587                 obj->state = ODEBUG_STATE_INIT;
588                 break;
589
590         case ODEBUG_STATE_ACTIVE:
591                 state = obj->state;
592                 raw_spin_unlock_irqrestore(&db->lock, flags);
593                 debug_print_object(obj, "init");
594                 debug_object_fixup(descr->fixup_init, addr, state);
595                 return;
596
597         case ODEBUG_STATE_DESTROYED:
598                 raw_spin_unlock_irqrestore(&db->lock, flags);
599                 debug_print_object(obj, "init");
600                 return;
601         default:
602                 break;
603         }
604
605         raw_spin_unlock_irqrestore(&db->lock, flags);
606         if (check_stack)
607                 debug_object_is_on_stack(addr, onstack);
608 }
609
610 /**
611  * debug_object_init - debug checks when an object is initialized
612  * @addr:       address of the object
613  * @descr:      pointer to an object specific debug description structure
614  */
615 void debug_object_init(void *addr, const struct debug_obj_descr *descr)
616 {
617         if (!debug_objects_enabled)
618                 return;
619
620         __debug_object_init(addr, descr, 0);
621 }
622 EXPORT_SYMBOL_GPL(debug_object_init);
623
624 /**
625  * debug_object_init_on_stack - debug checks when an object on stack is
626  *                              initialized
627  * @addr:       address of the object
628  * @descr:      pointer to an object specific debug description structure
629  */
630 void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
631 {
632         if (!debug_objects_enabled)
633                 return;
634
635         __debug_object_init(addr, descr, 1);
636 }
637 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
638
639 /**
640  * debug_object_activate - debug checks when an object is activated
641  * @addr:       address of the object
642  * @descr:      pointer to an object specific debug description structure
643  * Returns 0 for success, -EINVAL for check failed.
644  */
645 int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
646 {
647         enum debug_obj_state state;
648         struct debug_bucket *db;
649         struct debug_obj *obj;
650         unsigned long flags;
651         int ret;
652         struct debug_obj o = { .object = addr,
653                                .state = ODEBUG_STATE_NOTAVAILABLE,
654                                .descr = descr };
655
656         if (!debug_objects_enabled)
657                 return 0;
658
659         db = get_bucket((unsigned long) addr);
660
661         raw_spin_lock_irqsave(&db->lock, flags);
662
663         obj = lookup_object(addr, db);
664         if (obj) {
665                 bool print_object = false;
666
667                 switch (obj->state) {
668                 case ODEBUG_STATE_INIT:
669                 case ODEBUG_STATE_INACTIVE:
670                         obj->state = ODEBUG_STATE_ACTIVE;
671                         ret = 0;
672                         break;
673
674                 case ODEBUG_STATE_ACTIVE:
675                         state = obj->state;
676                         raw_spin_unlock_irqrestore(&db->lock, flags);
677                         debug_print_object(obj, "activate");
678                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
679                         return ret ? 0 : -EINVAL;
680
681                 case ODEBUG_STATE_DESTROYED:
682                         print_object = true;
683                         ret = -EINVAL;
684                         break;
685                 default:
686                         ret = 0;
687                         break;
688                 }
689                 raw_spin_unlock_irqrestore(&db->lock, flags);
690                 if (print_object)
691                         debug_print_object(obj, "activate");
692                 return ret;
693         }
694
695         raw_spin_unlock_irqrestore(&db->lock, flags);
696
697         /*
698          * We are here when a static object is activated. We
699          * let the type specific code confirm whether this is
700          * true or not. if true, we just make sure that the
701          * static object is tracked in the object tracker. If
702          * not, this must be a bug, so we try to fix it up.
703          */
704         if (descr->is_static_object && descr->is_static_object(addr)) {
705                 /* track this static object */
706                 debug_object_init(addr, descr);
707                 debug_object_activate(addr, descr);
708         } else {
709                 debug_print_object(&o, "activate");
710                 ret = debug_object_fixup(descr->fixup_activate, addr,
711                                         ODEBUG_STATE_NOTAVAILABLE);
712                 return ret ? 0 : -EINVAL;
713         }
714         return 0;
715 }
716 EXPORT_SYMBOL_GPL(debug_object_activate);
717
718 /**
719  * debug_object_deactivate - debug checks when an object is deactivated
720  * @addr:       address of the object
721  * @descr:      pointer to an object specific debug description structure
722  */
723 void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
724 {
725         struct debug_bucket *db;
726         struct debug_obj *obj;
727         unsigned long flags;
728         bool print_object = false;
729
730         if (!debug_objects_enabled)
731                 return;
732
733         db = get_bucket((unsigned long) addr);
734
735         raw_spin_lock_irqsave(&db->lock, flags);
736
737         obj = lookup_object(addr, db);
738         if (obj) {
739                 switch (obj->state) {
740                 case ODEBUG_STATE_INIT:
741                 case ODEBUG_STATE_INACTIVE:
742                 case ODEBUG_STATE_ACTIVE:
743                         if (!obj->astate)
744                                 obj->state = ODEBUG_STATE_INACTIVE;
745                         else
746                                 print_object = true;
747                         break;
748
749                 case ODEBUG_STATE_DESTROYED:
750                         print_object = true;
751                         break;
752                 default:
753                         break;
754                 }
755         }
756
757         raw_spin_unlock_irqrestore(&db->lock, flags);
758         if (!obj) {
759                 struct debug_obj o = { .object = addr,
760                                        .state = ODEBUG_STATE_NOTAVAILABLE,
761                                        .descr = descr };
762
763                 debug_print_object(&o, "deactivate");
764         } else if (print_object) {
765                 debug_print_object(obj, "deactivate");
766         }
767 }
768 EXPORT_SYMBOL_GPL(debug_object_deactivate);
769
770 /**
771  * debug_object_destroy - debug checks when an object is destroyed
772  * @addr:       address of the object
773  * @descr:      pointer to an object specific debug description structure
774  */
775 void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
776 {
777         enum debug_obj_state state;
778         struct debug_bucket *db;
779         struct debug_obj *obj;
780         unsigned long flags;
781         bool print_object = false;
782
783         if (!debug_objects_enabled)
784                 return;
785
786         db = get_bucket((unsigned long) addr);
787
788         raw_spin_lock_irqsave(&db->lock, flags);
789
790         obj = lookup_object(addr, db);
791         if (!obj)
792                 goto out_unlock;
793
794         switch (obj->state) {
795         case ODEBUG_STATE_NONE:
796         case ODEBUG_STATE_INIT:
797         case ODEBUG_STATE_INACTIVE:
798                 obj->state = ODEBUG_STATE_DESTROYED;
799                 break;
800         case ODEBUG_STATE_ACTIVE:
801                 state = obj->state;
802                 raw_spin_unlock_irqrestore(&db->lock, flags);
803                 debug_print_object(obj, "destroy");
804                 debug_object_fixup(descr->fixup_destroy, addr, state);
805                 return;
806
807         case ODEBUG_STATE_DESTROYED:
808                 print_object = true;
809                 break;
810         default:
811                 break;
812         }
813 out_unlock:
814         raw_spin_unlock_irqrestore(&db->lock, flags);
815         if (print_object)
816                 debug_print_object(obj, "destroy");
817 }
818 EXPORT_SYMBOL_GPL(debug_object_destroy);
819
820 /**
821  * debug_object_free - debug checks when an object is freed
822  * @addr:       address of the object
823  * @descr:      pointer to an object specific debug description structure
824  */
825 void debug_object_free(void *addr, const struct debug_obj_descr *descr)
826 {
827         enum debug_obj_state state;
828         struct debug_bucket *db;
829         struct debug_obj *obj;
830         unsigned long flags;
831
832         if (!debug_objects_enabled)
833                 return;
834
835         db = get_bucket((unsigned long) addr);
836
837         raw_spin_lock_irqsave(&db->lock, flags);
838
839         obj = lookup_object(addr, db);
840         if (!obj)
841                 goto out_unlock;
842
843         switch (obj->state) {
844         case ODEBUG_STATE_ACTIVE:
845                 state = obj->state;
846                 raw_spin_unlock_irqrestore(&db->lock, flags);
847                 debug_print_object(obj, "free");
848                 debug_object_fixup(descr->fixup_free, addr, state);
849                 return;
850         default:
851                 hlist_del(&obj->node);
852                 raw_spin_unlock_irqrestore(&db->lock, flags);
853                 free_object(obj);
854                 return;
855         }
856 out_unlock:
857         raw_spin_unlock_irqrestore(&db->lock, flags);
858 }
859 EXPORT_SYMBOL_GPL(debug_object_free);
860
861 /**
862  * debug_object_assert_init - debug checks when object should be init-ed
863  * @addr:       address of the object
864  * @descr:      pointer to an object specific debug description structure
865  */
866 void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
867 {
868         struct debug_bucket *db;
869         struct debug_obj *obj;
870         unsigned long flags;
871
872         if (!debug_objects_enabled)
873                 return;
874
875         db = get_bucket((unsigned long) addr);
876
877         raw_spin_lock_irqsave(&db->lock, flags);
878
879         obj = lookup_object(addr, db);
880         if (!obj) {
881                 struct debug_obj o = { .object = addr,
882                                        .state = ODEBUG_STATE_NOTAVAILABLE,
883                                        .descr = descr };
884
885                 raw_spin_unlock_irqrestore(&db->lock, flags);
886                 /*
887                  * Maybe the object is static, and we let the type specific
888                  * code confirm. Track this static object if true, else invoke
889                  * fixup.
890                  */
891                 if (descr->is_static_object && descr->is_static_object(addr)) {
892                         /* Track this static object */
893                         debug_object_init(addr, descr);
894                 } else {
895                         debug_print_object(&o, "assert_init");
896                         debug_object_fixup(descr->fixup_assert_init, addr,
897                                            ODEBUG_STATE_NOTAVAILABLE);
898                 }
899                 return;
900         }
901
902         raw_spin_unlock_irqrestore(&db->lock, flags);
903 }
904 EXPORT_SYMBOL_GPL(debug_object_assert_init);
905
906 /**
907  * debug_object_active_state - debug checks object usage state machine
908  * @addr:       address of the object
909  * @descr:      pointer to an object specific debug description structure
910  * @expect:     expected state
911  * @next:       state to move to if expected state is found
912  */
913 void
914 debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
915                           unsigned int expect, unsigned int next)
916 {
917         struct debug_bucket *db;
918         struct debug_obj *obj;
919         unsigned long flags;
920         bool print_object = false;
921
922         if (!debug_objects_enabled)
923                 return;
924
925         db = get_bucket((unsigned long) addr);
926
927         raw_spin_lock_irqsave(&db->lock, flags);
928
929         obj = lookup_object(addr, db);
930         if (obj) {
931                 switch (obj->state) {
932                 case ODEBUG_STATE_ACTIVE:
933                         if (obj->astate == expect)
934                                 obj->astate = next;
935                         else
936                                 print_object = true;
937                         break;
938
939                 default:
940                         print_object = true;
941                         break;
942                 }
943         }
944
945         raw_spin_unlock_irqrestore(&db->lock, flags);
946         if (!obj) {
947                 struct debug_obj o = { .object = addr,
948                                        .state = ODEBUG_STATE_NOTAVAILABLE,
949                                        .descr = descr };
950
951                 debug_print_object(&o, "active_state");
952         } else if (print_object) {
953                 debug_print_object(obj, "active_state");
954         }
955 }
956 EXPORT_SYMBOL_GPL(debug_object_active_state);
957
958 #ifdef CONFIG_DEBUG_OBJECTS_FREE
959 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
960 {
961         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
962         const struct debug_obj_descr *descr;
963         enum debug_obj_state state;
964         struct debug_bucket *db;
965         struct hlist_node *tmp;
966         struct debug_obj *obj;
967         int cnt, objs_checked = 0;
968
969         saddr = (unsigned long) address;
970         eaddr = saddr + size;
971         paddr = saddr & ODEBUG_CHUNK_MASK;
972         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
973         chunks >>= ODEBUG_CHUNK_SHIFT;
974
975         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
976                 db = get_bucket(paddr);
977
978 repeat:
979                 cnt = 0;
980                 raw_spin_lock_irqsave(&db->lock, flags);
981                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
982                         cnt++;
983                         oaddr = (unsigned long) obj->object;
984                         if (oaddr < saddr || oaddr >= eaddr)
985                                 continue;
986
987                         switch (obj->state) {
988                         case ODEBUG_STATE_ACTIVE:
989                                 descr = obj->descr;
990                                 state = obj->state;
991                                 raw_spin_unlock_irqrestore(&db->lock, flags);
992                                 debug_print_object(obj, "free");
993                                 debug_object_fixup(descr->fixup_free,
994                                                    (void *) oaddr, state);
995                                 goto repeat;
996                         default:
997                                 hlist_del(&obj->node);
998                                 __free_object(obj);
999                                 break;
1000                         }
1001                 }
1002                 raw_spin_unlock_irqrestore(&db->lock, flags);
1003
1004                 if (cnt > debug_objects_maxchain)
1005                         debug_objects_maxchain = cnt;
1006
1007                 objs_checked += cnt;
1008         }
1009
1010         if (objs_checked > debug_objects_maxchecked)
1011                 debug_objects_maxchecked = objs_checked;
1012
1013         /* Schedule work to actually kmem_cache_free() objects */
1014         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1015                 WRITE_ONCE(obj_freeing, true);
1016                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1017         }
1018 }
1019
1020 void debug_check_no_obj_freed(const void *address, unsigned long size)
1021 {
1022         if (debug_objects_enabled)
1023                 __debug_check_no_obj_freed(address, size);
1024 }
1025 #endif
1026
1027 #ifdef CONFIG_DEBUG_FS
1028
1029 static int debug_stats_show(struct seq_file *m, void *v)
1030 {
1031         int cpu, obj_percpu_free = 0;
1032
1033         for_each_possible_cpu(cpu)
1034                 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1035
1036         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1037         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1038         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1039         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1040         seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1041         seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1042         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1043         seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1044         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1045         seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1046         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1047         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1048         return 0;
1049 }
1050 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1051
1052 static int __init debug_objects_init_debugfs(void)
1053 {
1054         struct dentry *dbgdir;
1055
1056         if (!debug_objects_enabled)
1057                 return 0;
1058
1059         dbgdir = debugfs_create_dir("debug_objects", NULL);
1060
1061         debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1062
1063         return 0;
1064 }
1065 __initcall(debug_objects_init_debugfs);
1066
1067 #else
1068 static inline void debug_objects_init_debugfs(void) { }
1069 #endif
1070
1071 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1072
1073 /* Random data structure for the self test */
1074 struct self_test {
1075         unsigned long   dummy1[6];
1076         int             static_init;
1077         unsigned long   dummy2[3];
1078 };
1079
1080 static __initconst const struct debug_obj_descr descr_type_test;
1081
1082 static bool __init is_static_object(void *addr)
1083 {
1084         struct self_test *obj = addr;
1085
1086         return obj->static_init;
1087 }
1088
1089 /*
1090  * fixup_init is called when:
1091  * - an active object is initialized
1092  */
1093 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1094 {
1095         struct self_test *obj = addr;
1096
1097         switch (state) {
1098         case ODEBUG_STATE_ACTIVE:
1099                 debug_object_deactivate(obj, &descr_type_test);
1100                 debug_object_init(obj, &descr_type_test);
1101                 return true;
1102         default:
1103                 return false;
1104         }
1105 }
1106
1107 /*
1108  * fixup_activate is called when:
1109  * - an active object is activated
1110  * - an unknown non-static object is activated
1111  */
1112 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1113 {
1114         struct self_test *obj = addr;
1115
1116         switch (state) {
1117         case ODEBUG_STATE_NOTAVAILABLE:
1118                 return true;
1119         case ODEBUG_STATE_ACTIVE:
1120                 debug_object_deactivate(obj, &descr_type_test);
1121                 debug_object_activate(obj, &descr_type_test);
1122                 return true;
1123
1124         default:
1125                 return false;
1126         }
1127 }
1128
1129 /*
1130  * fixup_destroy is called when:
1131  * - an active object is destroyed
1132  */
1133 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1134 {
1135         struct self_test *obj = addr;
1136
1137         switch (state) {
1138         case ODEBUG_STATE_ACTIVE:
1139                 debug_object_deactivate(obj, &descr_type_test);
1140                 debug_object_destroy(obj, &descr_type_test);
1141                 return true;
1142         default:
1143                 return false;
1144         }
1145 }
1146
1147 /*
1148  * fixup_free is called when:
1149  * - an active object is freed
1150  */
1151 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1152 {
1153         struct self_test *obj = addr;
1154
1155         switch (state) {
1156         case ODEBUG_STATE_ACTIVE:
1157                 debug_object_deactivate(obj, &descr_type_test);
1158                 debug_object_free(obj, &descr_type_test);
1159                 return true;
1160         default:
1161                 return false;
1162         }
1163 }
1164
1165 static int __init
1166 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1167 {
1168         struct debug_bucket *db;
1169         struct debug_obj *obj;
1170         unsigned long flags;
1171         int res = -EINVAL;
1172
1173         db = get_bucket((unsigned long) addr);
1174
1175         raw_spin_lock_irqsave(&db->lock, flags);
1176
1177         obj = lookup_object(addr, db);
1178         if (!obj && state != ODEBUG_STATE_NONE) {
1179                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1180                 goto out;
1181         }
1182         if (obj && obj->state != state) {
1183                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1184                        obj->state, state);
1185                 goto out;
1186         }
1187         if (fixups != debug_objects_fixups) {
1188                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1189                        fixups, debug_objects_fixups);
1190                 goto out;
1191         }
1192         if (warnings != debug_objects_warnings) {
1193                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1194                        warnings, debug_objects_warnings);
1195                 goto out;
1196         }
1197         res = 0;
1198 out:
1199         raw_spin_unlock_irqrestore(&db->lock, flags);
1200         if (res)
1201                 debug_objects_enabled = 0;
1202         return res;
1203 }
1204
1205 static __initconst const struct debug_obj_descr descr_type_test = {
1206         .name                   = "selftest",
1207         .is_static_object       = is_static_object,
1208         .fixup_init             = fixup_init,
1209         .fixup_activate         = fixup_activate,
1210         .fixup_destroy          = fixup_destroy,
1211         .fixup_free             = fixup_free,
1212 };
1213
1214 static __initdata struct self_test obj = { .static_init = 0 };
1215
1216 static void __init debug_objects_selftest(void)
1217 {
1218         int fixups, oldfixups, warnings, oldwarnings;
1219         unsigned long flags;
1220
1221         local_irq_save(flags);
1222
1223         fixups = oldfixups = debug_objects_fixups;
1224         warnings = oldwarnings = debug_objects_warnings;
1225         descr_test = &descr_type_test;
1226
1227         debug_object_init(&obj, &descr_type_test);
1228         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1229                 goto out;
1230         debug_object_activate(&obj, &descr_type_test);
1231         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1232                 goto out;
1233         debug_object_activate(&obj, &descr_type_test);
1234         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1235                 goto out;
1236         debug_object_deactivate(&obj, &descr_type_test);
1237         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1238                 goto out;
1239         debug_object_destroy(&obj, &descr_type_test);
1240         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1241                 goto out;
1242         debug_object_init(&obj, &descr_type_test);
1243         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1244                 goto out;
1245         debug_object_activate(&obj, &descr_type_test);
1246         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1247                 goto out;
1248         debug_object_deactivate(&obj, &descr_type_test);
1249         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1250                 goto out;
1251         debug_object_free(&obj, &descr_type_test);
1252         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1253                 goto out;
1254
1255         obj.static_init = 1;
1256         debug_object_activate(&obj, &descr_type_test);
1257         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1258                 goto out;
1259         debug_object_init(&obj, &descr_type_test);
1260         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1261                 goto out;
1262         debug_object_free(&obj, &descr_type_test);
1263         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1264                 goto out;
1265
1266 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1267         debug_object_init(&obj, &descr_type_test);
1268         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1269                 goto out;
1270         debug_object_activate(&obj, &descr_type_test);
1271         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1272                 goto out;
1273         __debug_check_no_obj_freed(&obj, sizeof(obj));
1274         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1275                 goto out;
1276 #endif
1277         pr_info("selftest passed\n");
1278
1279 out:
1280         debug_objects_fixups = oldfixups;
1281         debug_objects_warnings = oldwarnings;
1282         descr_test = NULL;
1283
1284         local_irq_restore(flags);
1285 }
1286 #else
1287 static inline void debug_objects_selftest(void) { }
1288 #endif
1289
1290 /*
1291  * Called during early boot to initialize the hash buckets and link
1292  * the static object pool objects into the poll list. After this call
1293  * the object tracker is fully operational.
1294  */
1295 void __init debug_objects_early_init(void)
1296 {
1297         int i;
1298
1299         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1300                 raw_spin_lock_init(&obj_hash[i].lock);
1301
1302         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1303                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1304 }
1305
1306 /*
1307  * Convert the statically allocated objects to dynamic ones:
1308  */
1309 static int __init debug_objects_replace_static_objects(void)
1310 {
1311         struct debug_bucket *db = obj_hash;
1312         struct hlist_node *tmp;
1313         struct debug_obj *obj, *new;
1314         HLIST_HEAD(objects);
1315         int i, cnt = 0;
1316
1317         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1318                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1319                 if (!obj)
1320                         goto free;
1321                 hlist_add_head(&obj->node, &objects);
1322         }
1323
1324         /*
1325          * debug_objects_mem_init() is now called early that only one CPU is up
1326          * and interrupts have been disabled, so it is safe to replace the
1327          * active object references.
1328          */
1329
1330         /* Remove the statically allocated objects from the pool */
1331         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1332                 hlist_del(&obj->node);
1333         /* Move the allocated objects to the pool */
1334         hlist_move_list(&objects, &obj_pool);
1335
1336         /* Replace the active object references */
1337         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1338                 hlist_move_list(&db->list, &objects);
1339
1340                 hlist_for_each_entry(obj, &objects, node) {
1341                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1342                         hlist_del(&new->node);
1343                         /* copy object data */
1344                         *new = *obj;
1345                         hlist_add_head(&new->node, &db->list);
1346                         cnt++;
1347                 }
1348         }
1349
1350         pr_debug("%d of %d active objects replaced\n",
1351                  cnt, obj_pool_used);
1352         return 0;
1353 free:
1354         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1355                 hlist_del(&obj->node);
1356                 kmem_cache_free(obj_cache, obj);
1357         }
1358         return -ENOMEM;
1359 }
1360
1361 /*
1362  * Called after the kmem_caches are functional to setup a dedicated
1363  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1364  * prevents that the debug code is called on kmem_cache_free() for the
1365  * debug tracker objects to avoid recursive calls.
1366  */
1367 void __init debug_objects_mem_init(void)
1368 {
1369         int cpu, extras;
1370
1371         if (!debug_objects_enabled)
1372                 return;
1373
1374         /*
1375          * Initialize the percpu object pools
1376          *
1377          * Initialization is not strictly necessary, but was done for
1378          * completeness.
1379          */
1380         for_each_possible_cpu(cpu)
1381                 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1382
1383         obj_cache = kmem_cache_create("debug_objects_cache",
1384                                       sizeof (struct debug_obj), 0,
1385                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1386                                       NULL);
1387
1388         if (!obj_cache || debug_objects_replace_static_objects()) {
1389                 debug_objects_enabled = 0;
1390                 kmem_cache_destroy(obj_cache);
1391                 pr_warn("out of memory.\n");
1392         } else
1393                 debug_objects_selftest();
1394
1395 #ifdef CONFIG_HOTPLUG_CPU
1396         cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1397                                         object_cpu_offline);
1398 #endif
1399
1400         /*
1401          * Increase the thresholds for allocating and freeing objects
1402          * according to the number of possible CPUs available in the system.
1403          */
1404         extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1405         debug_objects_pool_size += extras;
1406         debug_objects_pool_min_level += extras;
1407 }