Merge branch 'next' into for-linus
[platform/kernel/linux-rpi.git] / lib / debugobjects.c
1 /*
2  * Generic infrastructure for lifetime debugging of objects.
3  *
4  * Started by Thomas Gleixner
5  *
6  * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
7  *
8  * For licencing details see kernel-base/COPYING
9  */
10
11 #define pr_fmt(fmt) "ODEBUG: " fmt
12
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/sched/task_stack.h>
17 #include <linux/seq_file.h>
18 #include <linux/debugfs.h>
19 #include <linux/slab.h>
20 #include <linux/hash.h>
21 #include <linux/kmemleak.h>
22
23 #define ODEBUG_HASH_BITS        14
24 #define ODEBUG_HASH_SIZE        (1 << ODEBUG_HASH_BITS)
25
26 #define ODEBUG_POOL_SIZE        1024
27 #define ODEBUG_POOL_MIN_LEVEL   256
28 #define ODEBUG_POOL_PERCPU_SIZE 64
29 #define ODEBUG_BATCH_SIZE       16
30
31 #define ODEBUG_CHUNK_SHIFT      PAGE_SHIFT
32 #define ODEBUG_CHUNK_SIZE       (1 << ODEBUG_CHUNK_SHIFT)
33 #define ODEBUG_CHUNK_MASK       (~(ODEBUG_CHUNK_SIZE - 1))
34
35 /*
36  * We limit the freeing of debug objects via workqueue at a maximum
37  * frequency of 10Hz and about 1024 objects for each freeing operation.
38  * So it is freeing at most 10k debug objects per second.
39  */
40 #define ODEBUG_FREE_WORK_MAX    1024
41 #define ODEBUG_FREE_WORK_DELAY  DIV_ROUND_UP(HZ, 10)
42
43 struct debug_bucket {
44         struct hlist_head       list;
45         raw_spinlock_t          lock;
46 };
47
48 /*
49  * Debug object percpu free list
50  * Access is protected by disabling irq
51  */
52 struct debug_percpu_free {
53         struct hlist_head       free_objs;
54         int                     obj_free;
55 };
56
57 static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
58
59 static struct debug_bucket      obj_hash[ODEBUG_HASH_SIZE];
60
61 static struct debug_obj         obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
62
63 static DEFINE_RAW_SPINLOCK(pool_lock);
64
65 static HLIST_HEAD(obj_pool);
66 static HLIST_HEAD(obj_to_free);
67
68 /*
69  * Because of the presence of percpu free pools, obj_pool_free will
70  * under-count those in the percpu free pools. Similarly, obj_pool_used
71  * will over-count those in the percpu free pools. Adjustments will be
72  * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
73  * can be off.
74  */
75 static int                      obj_pool_min_free = ODEBUG_POOL_SIZE;
76 static int                      obj_pool_free = ODEBUG_POOL_SIZE;
77 static int                      obj_pool_used;
78 static int                      obj_pool_max_used;
79 static bool                     obj_freeing;
80 /* The number of objs on the global free list */
81 static int                      obj_nr_tofree;
82
83 static int                      debug_objects_maxchain __read_mostly;
84 static int __maybe_unused       debug_objects_maxchecked __read_mostly;
85 static int                      debug_objects_fixups __read_mostly;
86 static int                      debug_objects_warnings __read_mostly;
87 static int                      debug_objects_enabled __read_mostly
88                                 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
89 static int                      debug_objects_pool_size __read_mostly
90                                 = ODEBUG_POOL_SIZE;
91 static int                      debug_objects_pool_min_level __read_mostly
92                                 = ODEBUG_POOL_MIN_LEVEL;
93 static struct debug_obj_descr   *descr_test  __read_mostly;
94 static struct kmem_cache        *obj_cache __read_mostly;
95
96 /*
97  * Track numbers of kmem_cache_alloc()/free() calls done.
98  */
99 static int                      debug_objects_allocated;
100 static int                      debug_objects_freed;
101
102 static void free_obj_work(struct work_struct *work);
103 static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
104
105 static int __init enable_object_debug(char *str)
106 {
107         debug_objects_enabled = 1;
108         return 0;
109 }
110
111 static int __init disable_object_debug(char *str)
112 {
113         debug_objects_enabled = 0;
114         return 0;
115 }
116
117 early_param("debug_objects", enable_object_debug);
118 early_param("no_debug_objects", disable_object_debug);
119
120 static const char *obj_states[ODEBUG_STATE_MAX] = {
121         [ODEBUG_STATE_NONE]             = "none",
122         [ODEBUG_STATE_INIT]             = "initialized",
123         [ODEBUG_STATE_INACTIVE]         = "inactive",
124         [ODEBUG_STATE_ACTIVE]           = "active",
125         [ODEBUG_STATE_DESTROYED]        = "destroyed",
126         [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
127 };
128
129 static void fill_pool(void)
130 {
131         gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
132         struct debug_obj *obj;
133         unsigned long flags;
134
135         if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
136                 return;
137
138         /*
139          * Reuse objs from the global free list; they will be reinitialized
140          * when allocating.
141          *
142          * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
143          * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
144          * sections.
145          */
146         while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
147                 raw_spin_lock_irqsave(&pool_lock, flags);
148                 /*
149                  * Recheck with the lock held as the worker thread might have
150                  * won the race and freed the global free list already.
151                  */
152                 while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
153                         obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
154                         hlist_del(&obj->node);
155                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
156                         hlist_add_head(&obj->node, &obj_pool);
157                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
158                 }
159                 raw_spin_unlock_irqrestore(&pool_lock, flags);
160         }
161
162         if (unlikely(!obj_cache))
163                 return;
164
165         while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
166                 struct debug_obj *new[ODEBUG_BATCH_SIZE];
167                 int cnt;
168
169                 for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
170                         new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
171                         if (!new[cnt])
172                                 break;
173                 }
174                 if (!cnt)
175                         return;
176
177                 raw_spin_lock_irqsave(&pool_lock, flags);
178                 while (cnt) {
179                         hlist_add_head(&new[--cnt]->node, &obj_pool);
180                         debug_objects_allocated++;
181                         WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
182                 }
183                 raw_spin_unlock_irqrestore(&pool_lock, flags);
184         }
185 }
186
187 /*
188  * Lookup an object in the hash bucket.
189  */
190 static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
191 {
192         struct debug_obj *obj;
193         int cnt = 0;
194
195         hlist_for_each_entry(obj, &b->list, node) {
196                 cnt++;
197                 if (obj->object == addr)
198                         return obj;
199         }
200         if (cnt > debug_objects_maxchain)
201                 debug_objects_maxchain = cnt;
202
203         return NULL;
204 }
205
206 /*
207  * Allocate a new object from the hlist
208  */
209 static struct debug_obj *__alloc_object(struct hlist_head *list)
210 {
211         struct debug_obj *obj = NULL;
212
213         if (list->first) {
214                 obj = hlist_entry(list->first, typeof(*obj), node);
215                 hlist_del(&obj->node);
216         }
217
218         return obj;
219 }
220
221 /*
222  * Allocate a new object. If the pool is empty, switch off the debugger.
223  * Must be called with interrupts disabled.
224  */
225 static struct debug_obj *
226 alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
227 {
228         struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
229         struct debug_obj *obj;
230
231         if (likely(obj_cache)) {
232                 obj = __alloc_object(&percpu_pool->free_objs);
233                 if (obj) {
234                         percpu_pool->obj_free--;
235                         goto init_obj;
236                 }
237         }
238
239         raw_spin_lock(&pool_lock);
240         obj = __alloc_object(&obj_pool);
241         if (obj) {
242                 obj_pool_used++;
243                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
244
245                 /*
246                  * Looking ahead, allocate one batch of debug objects and
247                  * put them into the percpu free pool.
248                  */
249                 if (likely(obj_cache)) {
250                         int i;
251
252                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
253                                 struct debug_obj *obj2;
254
255                                 obj2 = __alloc_object(&obj_pool);
256                                 if (!obj2)
257                                         break;
258                                 hlist_add_head(&obj2->node,
259                                                &percpu_pool->free_objs);
260                                 percpu_pool->obj_free++;
261                                 obj_pool_used++;
262                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
263                         }
264                 }
265
266                 if (obj_pool_used > obj_pool_max_used)
267                         obj_pool_max_used = obj_pool_used;
268
269                 if (obj_pool_free < obj_pool_min_free)
270                         obj_pool_min_free = obj_pool_free;
271         }
272         raw_spin_unlock(&pool_lock);
273
274 init_obj:
275         if (obj) {
276                 obj->object = addr;
277                 obj->descr  = descr;
278                 obj->state  = ODEBUG_STATE_NONE;
279                 obj->astate = 0;
280                 hlist_add_head(&obj->node, &b->list);
281         }
282         return obj;
283 }
284
285 /*
286  * workqueue function to free objects.
287  *
288  * To reduce contention on the global pool_lock, the actual freeing of
289  * debug objects will be delayed if the pool_lock is busy.
290  */
291 static void free_obj_work(struct work_struct *work)
292 {
293         struct hlist_node *tmp;
294         struct debug_obj *obj;
295         unsigned long flags;
296         HLIST_HEAD(tofree);
297
298         WRITE_ONCE(obj_freeing, false);
299         if (!raw_spin_trylock_irqsave(&pool_lock, flags))
300                 return;
301
302         if (obj_pool_free >= debug_objects_pool_size)
303                 goto free_objs;
304
305         /*
306          * The objs on the pool list might be allocated before the work is
307          * run, so recheck if pool list it full or not, if not fill pool
308          * list from the global free list. As it is likely that a workload
309          * may be gearing up to use more and more objects, don't free any
310          * of them until the next round.
311          */
312         while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
313                 obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
314                 hlist_del(&obj->node);
315                 hlist_add_head(&obj->node, &obj_pool);
316                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
317                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
318         }
319         raw_spin_unlock_irqrestore(&pool_lock, flags);
320         return;
321
322 free_objs:
323         /*
324          * Pool list is already full and there are still objs on the free
325          * list. Move remaining free objs to a temporary list to free the
326          * memory outside the pool_lock held region.
327          */
328         if (obj_nr_tofree) {
329                 hlist_move_list(&obj_to_free, &tofree);
330                 debug_objects_freed += obj_nr_tofree;
331                 WRITE_ONCE(obj_nr_tofree, 0);
332         }
333         raw_spin_unlock_irqrestore(&pool_lock, flags);
334
335         hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
336                 hlist_del(&obj->node);
337                 kmem_cache_free(obj_cache, obj);
338         }
339 }
340
341 static void __free_object(struct debug_obj *obj)
342 {
343         struct debug_obj *objs[ODEBUG_BATCH_SIZE];
344         struct debug_percpu_free *percpu_pool;
345         int lookahead_count = 0;
346         unsigned long flags;
347         bool work;
348
349         local_irq_save(flags);
350         if (!obj_cache)
351                 goto free_to_obj_pool;
352
353         /*
354          * Try to free it into the percpu pool first.
355          */
356         percpu_pool = this_cpu_ptr(&percpu_obj_pool);
357         if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
358                 hlist_add_head(&obj->node, &percpu_pool->free_objs);
359                 percpu_pool->obj_free++;
360                 local_irq_restore(flags);
361                 return;
362         }
363
364         /*
365          * As the percpu pool is full, look ahead and pull out a batch
366          * of objects from the percpu pool and free them as well.
367          */
368         for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
369                 objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
370                 if (!objs[lookahead_count])
371                         break;
372                 percpu_pool->obj_free--;
373         }
374
375 free_to_obj_pool:
376         raw_spin_lock(&pool_lock);
377         work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
378                (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
379         obj_pool_used--;
380
381         if (work) {
382                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
383                 hlist_add_head(&obj->node, &obj_to_free);
384                 if (lookahead_count) {
385                         WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
386                         obj_pool_used -= lookahead_count;
387                         while (lookahead_count) {
388                                 hlist_add_head(&objs[--lookahead_count]->node,
389                                                &obj_to_free);
390                         }
391                 }
392
393                 if ((obj_pool_free > debug_objects_pool_size) &&
394                     (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
395                         int i;
396
397                         /*
398                          * Free one more batch of objects from obj_pool.
399                          */
400                         for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
401                                 obj = __alloc_object(&obj_pool);
402                                 hlist_add_head(&obj->node, &obj_to_free);
403                                 WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
404                                 WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
405                         }
406                 }
407         } else {
408                 WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
409                 hlist_add_head(&obj->node, &obj_pool);
410                 if (lookahead_count) {
411                         WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
412                         obj_pool_used -= lookahead_count;
413                         while (lookahead_count) {
414                                 hlist_add_head(&objs[--lookahead_count]->node,
415                                                &obj_pool);
416                         }
417                 }
418         }
419         raw_spin_unlock(&pool_lock);
420         local_irq_restore(flags);
421 }
422
423 /*
424  * Put the object back into the pool and schedule work to free objects
425  * if necessary.
426  */
427 static void free_object(struct debug_obj *obj)
428 {
429         __free_object(obj);
430         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
431                 WRITE_ONCE(obj_freeing, true);
432                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
433         }
434 }
435
436 /*
437  * We run out of memory. That means we probably have tons of objects
438  * allocated.
439  */
440 static void debug_objects_oom(void)
441 {
442         struct debug_bucket *db = obj_hash;
443         struct hlist_node *tmp;
444         HLIST_HEAD(freelist);
445         struct debug_obj *obj;
446         unsigned long flags;
447         int i;
448
449         pr_warn("Out of memory. ODEBUG disabled\n");
450
451         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
452                 raw_spin_lock_irqsave(&db->lock, flags);
453                 hlist_move_list(&db->list, &freelist);
454                 raw_spin_unlock_irqrestore(&db->lock, flags);
455
456                 /* Now free them */
457                 hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
458                         hlist_del(&obj->node);
459                         free_object(obj);
460                 }
461         }
462 }
463
464 /*
465  * We use the pfn of the address for the hash. That way we can check
466  * for freed objects simply by checking the affected bucket.
467  */
468 static struct debug_bucket *get_bucket(unsigned long addr)
469 {
470         unsigned long hash;
471
472         hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
473         return &obj_hash[hash];
474 }
475
476 static void debug_print_object(struct debug_obj *obj, char *msg)
477 {
478         struct debug_obj_descr *descr = obj->descr;
479         static int limit;
480
481         if (limit < 5 && descr != descr_test) {
482                 void *hint = descr->debug_hint ?
483                         descr->debug_hint(obj->object) : NULL;
484                 limit++;
485                 WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
486                                  "object type: %s hint: %pS\n",
487                         msg, obj_states[obj->state], obj->astate,
488                         descr->name, hint);
489         }
490         debug_objects_warnings++;
491 }
492
493 /*
494  * Try to repair the damage, so we have a better chance to get useful
495  * debug output.
496  */
497 static bool
498 debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
499                    void * addr, enum debug_obj_state state)
500 {
501         if (fixup && fixup(addr, state)) {
502                 debug_objects_fixups++;
503                 return true;
504         }
505         return false;
506 }
507
508 static void debug_object_is_on_stack(void *addr, int onstack)
509 {
510         int is_on_stack;
511         static int limit;
512
513         if (limit > 4)
514                 return;
515
516         is_on_stack = object_is_on_stack(addr);
517         if (is_on_stack == onstack)
518                 return;
519
520         limit++;
521         if (is_on_stack)
522                 pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
523                          task_stack_page(current));
524         else
525                 pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
526                          task_stack_page(current));
527
528         WARN_ON(1);
529 }
530
531 static void
532 __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
533 {
534         enum debug_obj_state state;
535         bool check_stack = false;
536         struct debug_bucket *db;
537         struct debug_obj *obj;
538         unsigned long flags;
539
540         fill_pool();
541
542         db = get_bucket((unsigned long) addr);
543
544         raw_spin_lock_irqsave(&db->lock, flags);
545
546         obj = lookup_object(addr, db);
547         if (!obj) {
548                 obj = alloc_object(addr, db, descr);
549                 if (!obj) {
550                         debug_objects_enabled = 0;
551                         raw_spin_unlock_irqrestore(&db->lock, flags);
552                         debug_objects_oom();
553                         return;
554                 }
555                 check_stack = true;
556         }
557
558         switch (obj->state) {
559         case ODEBUG_STATE_NONE:
560         case ODEBUG_STATE_INIT:
561         case ODEBUG_STATE_INACTIVE:
562                 obj->state = ODEBUG_STATE_INIT;
563                 break;
564
565         case ODEBUG_STATE_ACTIVE:
566                 state = obj->state;
567                 raw_spin_unlock_irqrestore(&db->lock, flags);
568                 debug_print_object(obj, "init");
569                 debug_object_fixup(descr->fixup_init, addr, state);
570                 return;
571
572         case ODEBUG_STATE_DESTROYED:
573                 raw_spin_unlock_irqrestore(&db->lock, flags);
574                 debug_print_object(obj, "init");
575                 return;
576         default:
577                 break;
578         }
579
580         raw_spin_unlock_irqrestore(&db->lock, flags);
581         if (check_stack)
582                 debug_object_is_on_stack(addr, onstack);
583 }
584
585 /**
586  * debug_object_init - debug checks when an object is initialized
587  * @addr:       address of the object
588  * @descr:      pointer to an object specific debug description structure
589  */
590 void debug_object_init(void *addr, struct debug_obj_descr *descr)
591 {
592         if (!debug_objects_enabled)
593                 return;
594
595         __debug_object_init(addr, descr, 0);
596 }
597 EXPORT_SYMBOL_GPL(debug_object_init);
598
599 /**
600  * debug_object_init_on_stack - debug checks when an object on stack is
601  *                              initialized
602  * @addr:       address of the object
603  * @descr:      pointer to an object specific debug description structure
604  */
605 void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
606 {
607         if (!debug_objects_enabled)
608                 return;
609
610         __debug_object_init(addr, descr, 1);
611 }
612 EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
613
614 /**
615  * debug_object_activate - debug checks when an object is activated
616  * @addr:       address of the object
617  * @descr:      pointer to an object specific debug description structure
618  * Returns 0 for success, -EINVAL for check failed.
619  */
620 int debug_object_activate(void *addr, struct debug_obj_descr *descr)
621 {
622         enum debug_obj_state state;
623         struct debug_bucket *db;
624         struct debug_obj *obj;
625         unsigned long flags;
626         int ret;
627         struct debug_obj o = { .object = addr,
628                                .state = ODEBUG_STATE_NOTAVAILABLE,
629                                .descr = descr };
630
631         if (!debug_objects_enabled)
632                 return 0;
633
634         db = get_bucket((unsigned long) addr);
635
636         raw_spin_lock_irqsave(&db->lock, flags);
637
638         obj = lookup_object(addr, db);
639         if (obj) {
640                 bool print_object = false;
641
642                 switch (obj->state) {
643                 case ODEBUG_STATE_INIT:
644                 case ODEBUG_STATE_INACTIVE:
645                         obj->state = ODEBUG_STATE_ACTIVE;
646                         ret = 0;
647                         break;
648
649                 case ODEBUG_STATE_ACTIVE:
650                         state = obj->state;
651                         raw_spin_unlock_irqrestore(&db->lock, flags);
652                         debug_print_object(obj, "activate");
653                         ret = debug_object_fixup(descr->fixup_activate, addr, state);
654                         return ret ? 0 : -EINVAL;
655
656                 case ODEBUG_STATE_DESTROYED:
657                         print_object = true;
658                         ret = -EINVAL;
659                         break;
660                 default:
661                         ret = 0;
662                         break;
663                 }
664                 raw_spin_unlock_irqrestore(&db->lock, flags);
665                 if (print_object)
666                         debug_print_object(obj, "activate");
667                 return ret;
668         }
669
670         raw_spin_unlock_irqrestore(&db->lock, flags);
671
672         /*
673          * We are here when a static object is activated. We
674          * let the type specific code confirm whether this is
675          * true or not. if true, we just make sure that the
676          * static object is tracked in the object tracker. If
677          * not, this must be a bug, so we try to fix it up.
678          */
679         if (descr->is_static_object && descr->is_static_object(addr)) {
680                 /* track this static object */
681                 debug_object_init(addr, descr);
682                 debug_object_activate(addr, descr);
683         } else {
684                 debug_print_object(&o, "activate");
685                 ret = debug_object_fixup(descr->fixup_activate, addr,
686                                         ODEBUG_STATE_NOTAVAILABLE);
687                 return ret ? 0 : -EINVAL;
688         }
689         return 0;
690 }
691 EXPORT_SYMBOL_GPL(debug_object_activate);
692
693 /**
694  * debug_object_deactivate - debug checks when an object is deactivated
695  * @addr:       address of the object
696  * @descr:      pointer to an object specific debug description structure
697  */
698 void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
699 {
700         struct debug_bucket *db;
701         struct debug_obj *obj;
702         unsigned long flags;
703         bool print_object = false;
704
705         if (!debug_objects_enabled)
706                 return;
707
708         db = get_bucket((unsigned long) addr);
709
710         raw_spin_lock_irqsave(&db->lock, flags);
711
712         obj = lookup_object(addr, db);
713         if (obj) {
714                 switch (obj->state) {
715                 case ODEBUG_STATE_INIT:
716                 case ODEBUG_STATE_INACTIVE:
717                 case ODEBUG_STATE_ACTIVE:
718                         if (!obj->astate)
719                                 obj->state = ODEBUG_STATE_INACTIVE;
720                         else
721                                 print_object = true;
722                         break;
723
724                 case ODEBUG_STATE_DESTROYED:
725                         print_object = true;
726                         break;
727                 default:
728                         break;
729                 }
730         }
731
732         raw_spin_unlock_irqrestore(&db->lock, flags);
733         if (!obj) {
734                 struct debug_obj o = { .object = addr,
735                                        .state = ODEBUG_STATE_NOTAVAILABLE,
736                                        .descr = descr };
737
738                 debug_print_object(&o, "deactivate");
739         } else if (print_object) {
740                 debug_print_object(obj, "deactivate");
741         }
742 }
743 EXPORT_SYMBOL_GPL(debug_object_deactivate);
744
745 /**
746  * debug_object_destroy - debug checks when an object is destroyed
747  * @addr:       address of the object
748  * @descr:      pointer to an object specific debug description structure
749  */
750 void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
751 {
752         enum debug_obj_state state;
753         struct debug_bucket *db;
754         struct debug_obj *obj;
755         unsigned long flags;
756         bool print_object = false;
757
758         if (!debug_objects_enabled)
759                 return;
760
761         db = get_bucket((unsigned long) addr);
762
763         raw_spin_lock_irqsave(&db->lock, flags);
764
765         obj = lookup_object(addr, db);
766         if (!obj)
767                 goto out_unlock;
768
769         switch (obj->state) {
770         case ODEBUG_STATE_NONE:
771         case ODEBUG_STATE_INIT:
772         case ODEBUG_STATE_INACTIVE:
773                 obj->state = ODEBUG_STATE_DESTROYED;
774                 break;
775         case ODEBUG_STATE_ACTIVE:
776                 state = obj->state;
777                 raw_spin_unlock_irqrestore(&db->lock, flags);
778                 debug_print_object(obj, "destroy");
779                 debug_object_fixup(descr->fixup_destroy, addr, state);
780                 return;
781
782         case ODEBUG_STATE_DESTROYED:
783                 print_object = true;
784                 break;
785         default:
786                 break;
787         }
788 out_unlock:
789         raw_spin_unlock_irqrestore(&db->lock, flags);
790         if (print_object)
791                 debug_print_object(obj, "destroy");
792 }
793 EXPORT_SYMBOL_GPL(debug_object_destroy);
794
795 /**
796  * debug_object_free - debug checks when an object is freed
797  * @addr:       address of the object
798  * @descr:      pointer to an object specific debug description structure
799  */
800 void debug_object_free(void *addr, struct debug_obj_descr *descr)
801 {
802         enum debug_obj_state state;
803         struct debug_bucket *db;
804         struct debug_obj *obj;
805         unsigned long flags;
806
807         if (!debug_objects_enabled)
808                 return;
809
810         db = get_bucket((unsigned long) addr);
811
812         raw_spin_lock_irqsave(&db->lock, flags);
813
814         obj = lookup_object(addr, db);
815         if (!obj)
816                 goto out_unlock;
817
818         switch (obj->state) {
819         case ODEBUG_STATE_ACTIVE:
820                 state = obj->state;
821                 raw_spin_unlock_irqrestore(&db->lock, flags);
822                 debug_print_object(obj, "free");
823                 debug_object_fixup(descr->fixup_free, addr, state);
824                 return;
825         default:
826                 hlist_del(&obj->node);
827                 raw_spin_unlock_irqrestore(&db->lock, flags);
828                 free_object(obj);
829                 return;
830         }
831 out_unlock:
832         raw_spin_unlock_irqrestore(&db->lock, flags);
833 }
834 EXPORT_SYMBOL_GPL(debug_object_free);
835
836 /**
837  * debug_object_assert_init - debug checks when object should be init-ed
838  * @addr:       address of the object
839  * @descr:      pointer to an object specific debug description structure
840  */
841 void debug_object_assert_init(void *addr, struct debug_obj_descr *descr)
842 {
843         struct debug_bucket *db;
844         struct debug_obj *obj;
845         unsigned long flags;
846
847         if (!debug_objects_enabled)
848                 return;
849
850         db = get_bucket((unsigned long) addr);
851
852         raw_spin_lock_irqsave(&db->lock, flags);
853
854         obj = lookup_object(addr, db);
855         if (!obj) {
856                 struct debug_obj o = { .object = addr,
857                                        .state = ODEBUG_STATE_NOTAVAILABLE,
858                                        .descr = descr };
859
860                 raw_spin_unlock_irqrestore(&db->lock, flags);
861                 /*
862                  * Maybe the object is static, and we let the type specific
863                  * code confirm. Track this static object if true, else invoke
864                  * fixup.
865                  */
866                 if (descr->is_static_object && descr->is_static_object(addr)) {
867                         /* Track this static object */
868                         debug_object_init(addr, descr);
869                 } else {
870                         debug_print_object(&o, "assert_init");
871                         debug_object_fixup(descr->fixup_assert_init, addr,
872                                            ODEBUG_STATE_NOTAVAILABLE);
873                 }
874                 return;
875         }
876
877         raw_spin_unlock_irqrestore(&db->lock, flags);
878 }
879 EXPORT_SYMBOL_GPL(debug_object_assert_init);
880
881 /**
882  * debug_object_active_state - debug checks object usage state machine
883  * @addr:       address of the object
884  * @descr:      pointer to an object specific debug description structure
885  * @expect:     expected state
886  * @next:       state to move to if expected state is found
887  */
888 void
889 debug_object_active_state(void *addr, struct debug_obj_descr *descr,
890                           unsigned int expect, unsigned int next)
891 {
892         struct debug_bucket *db;
893         struct debug_obj *obj;
894         unsigned long flags;
895         bool print_object = false;
896
897         if (!debug_objects_enabled)
898                 return;
899
900         db = get_bucket((unsigned long) addr);
901
902         raw_spin_lock_irqsave(&db->lock, flags);
903
904         obj = lookup_object(addr, db);
905         if (obj) {
906                 switch (obj->state) {
907                 case ODEBUG_STATE_ACTIVE:
908                         if (obj->astate == expect)
909                                 obj->astate = next;
910                         else
911                                 print_object = true;
912                         break;
913
914                 default:
915                         print_object = true;
916                         break;
917                 }
918         }
919
920         raw_spin_unlock_irqrestore(&db->lock, flags);
921         if (!obj) {
922                 struct debug_obj o = { .object = addr,
923                                        .state = ODEBUG_STATE_NOTAVAILABLE,
924                                        .descr = descr };
925
926                 debug_print_object(&o, "active_state");
927         } else if (print_object) {
928                 debug_print_object(obj, "active_state");
929         }
930 }
931 EXPORT_SYMBOL_GPL(debug_object_active_state);
932
933 #ifdef CONFIG_DEBUG_OBJECTS_FREE
934 static void __debug_check_no_obj_freed(const void *address, unsigned long size)
935 {
936         unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
937         struct debug_obj_descr *descr;
938         enum debug_obj_state state;
939         struct debug_bucket *db;
940         struct hlist_node *tmp;
941         struct debug_obj *obj;
942         int cnt, objs_checked = 0;
943
944         saddr = (unsigned long) address;
945         eaddr = saddr + size;
946         paddr = saddr & ODEBUG_CHUNK_MASK;
947         chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
948         chunks >>= ODEBUG_CHUNK_SHIFT;
949
950         for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
951                 db = get_bucket(paddr);
952
953 repeat:
954                 cnt = 0;
955                 raw_spin_lock_irqsave(&db->lock, flags);
956                 hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
957                         cnt++;
958                         oaddr = (unsigned long) obj->object;
959                         if (oaddr < saddr || oaddr >= eaddr)
960                                 continue;
961
962                         switch (obj->state) {
963                         case ODEBUG_STATE_ACTIVE:
964                                 descr = obj->descr;
965                                 state = obj->state;
966                                 raw_spin_unlock_irqrestore(&db->lock, flags);
967                                 debug_print_object(obj, "free");
968                                 debug_object_fixup(descr->fixup_free,
969                                                    (void *) oaddr, state);
970                                 goto repeat;
971                         default:
972                                 hlist_del(&obj->node);
973                                 __free_object(obj);
974                                 break;
975                         }
976                 }
977                 raw_spin_unlock_irqrestore(&db->lock, flags);
978
979                 if (cnt > debug_objects_maxchain)
980                         debug_objects_maxchain = cnt;
981
982                 objs_checked += cnt;
983         }
984
985         if (objs_checked > debug_objects_maxchecked)
986                 debug_objects_maxchecked = objs_checked;
987
988         /* Schedule work to actually kmem_cache_free() objects */
989         if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
990                 WRITE_ONCE(obj_freeing, true);
991                 schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
992         }
993 }
994
995 void debug_check_no_obj_freed(const void *address, unsigned long size)
996 {
997         if (debug_objects_enabled)
998                 __debug_check_no_obj_freed(address, size);
999 }
1000 #endif
1001
1002 #ifdef CONFIG_DEBUG_FS
1003
1004 static int debug_stats_show(struct seq_file *m, void *v)
1005 {
1006         int cpu, obj_percpu_free = 0;
1007
1008         for_each_possible_cpu(cpu)
1009                 obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1010
1011         seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1012         seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1013         seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1014         seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1015         seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1016         seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1017         seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1018         seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1019         seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1020         seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1021         seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1022         seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1023         return 0;
1024 }
1025 DEFINE_SHOW_ATTRIBUTE(debug_stats);
1026
1027 static int __init debug_objects_init_debugfs(void)
1028 {
1029         struct dentry *dbgdir;
1030
1031         if (!debug_objects_enabled)
1032                 return 0;
1033
1034         dbgdir = debugfs_create_dir("debug_objects", NULL);
1035
1036         debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1037
1038         return 0;
1039 }
1040 __initcall(debug_objects_init_debugfs);
1041
1042 #else
1043 static inline void debug_objects_init_debugfs(void) { }
1044 #endif
1045
1046 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1047
1048 /* Random data structure for the self test */
1049 struct self_test {
1050         unsigned long   dummy1[6];
1051         int             static_init;
1052         unsigned long   dummy2[3];
1053 };
1054
1055 static __initdata struct debug_obj_descr descr_type_test;
1056
1057 static bool __init is_static_object(void *addr)
1058 {
1059         struct self_test *obj = addr;
1060
1061         return obj->static_init;
1062 }
1063
1064 /*
1065  * fixup_init is called when:
1066  * - an active object is initialized
1067  */
1068 static bool __init fixup_init(void *addr, enum debug_obj_state state)
1069 {
1070         struct self_test *obj = addr;
1071
1072         switch (state) {
1073         case ODEBUG_STATE_ACTIVE:
1074                 debug_object_deactivate(obj, &descr_type_test);
1075                 debug_object_init(obj, &descr_type_test);
1076                 return true;
1077         default:
1078                 return false;
1079         }
1080 }
1081
1082 /*
1083  * fixup_activate is called when:
1084  * - an active object is activated
1085  * - an unknown non-static object is activated
1086  */
1087 static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1088 {
1089         struct self_test *obj = addr;
1090
1091         switch (state) {
1092         case ODEBUG_STATE_NOTAVAILABLE:
1093                 return true;
1094         case ODEBUG_STATE_ACTIVE:
1095                 debug_object_deactivate(obj, &descr_type_test);
1096                 debug_object_activate(obj, &descr_type_test);
1097                 return true;
1098
1099         default:
1100                 return false;
1101         }
1102 }
1103
1104 /*
1105  * fixup_destroy is called when:
1106  * - an active object is destroyed
1107  */
1108 static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1109 {
1110         struct self_test *obj = addr;
1111
1112         switch (state) {
1113         case ODEBUG_STATE_ACTIVE:
1114                 debug_object_deactivate(obj, &descr_type_test);
1115                 debug_object_destroy(obj, &descr_type_test);
1116                 return true;
1117         default:
1118                 return false;
1119         }
1120 }
1121
1122 /*
1123  * fixup_free is called when:
1124  * - an active object is freed
1125  */
1126 static bool __init fixup_free(void *addr, enum debug_obj_state state)
1127 {
1128         struct self_test *obj = addr;
1129
1130         switch (state) {
1131         case ODEBUG_STATE_ACTIVE:
1132                 debug_object_deactivate(obj, &descr_type_test);
1133                 debug_object_free(obj, &descr_type_test);
1134                 return true;
1135         default:
1136                 return false;
1137         }
1138 }
1139
1140 static int __init
1141 check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1142 {
1143         struct debug_bucket *db;
1144         struct debug_obj *obj;
1145         unsigned long flags;
1146         int res = -EINVAL;
1147
1148         db = get_bucket((unsigned long) addr);
1149
1150         raw_spin_lock_irqsave(&db->lock, flags);
1151
1152         obj = lookup_object(addr, db);
1153         if (!obj && state != ODEBUG_STATE_NONE) {
1154                 WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1155                 goto out;
1156         }
1157         if (obj && obj->state != state) {
1158                 WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1159                        obj->state, state);
1160                 goto out;
1161         }
1162         if (fixups != debug_objects_fixups) {
1163                 WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1164                        fixups, debug_objects_fixups);
1165                 goto out;
1166         }
1167         if (warnings != debug_objects_warnings) {
1168                 WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1169                        warnings, debug_objects_warnings);
1170                 goto out;
1171         }
1172         res = 0;
1173 out:
1174         raw_spin_unlock_irqrestore(&db->lock, flags);
1175         if (res)
1176                 debug_objects_enabled = 0;
1177         return res;
1178 }
1179
1180 static __initdata struct debug_obj_descr descr_type_test = {
1181         .name                   = "selftest",
1182         .is_static_object       = is_static_object,
1183         .fixup_init             = fixup_init,
1184         .fixup_activate         = fixup_activate,
1185         .fixup_destroy          = fixup_destroy,
1186         .fixup_free             = fixup_free,
1187 };
1188
1189 static __initdata struct self_test obj = { .static_init = 0 };
1190
1191 static void __init debug_objects_selftest(void)
1192 {
1193         int fixups, oldfixups, warnings, oldwarnings;
1194         unsigned long flags;
1195
1196         local_irq_save(flags);
1197
1198         fixups = oldfixups = debug_objects_fixups;
1199         warnings = oldwarnings = debug_objects_warnings;
1200         descr_test = &descr_type_test;
1201
1202         debug_object_init(&obj, &descr_type_test);
1203         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1204                 goto out;
1205         debug_object_activate(&obj, &descr_type_test);
1206         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1207                 goto out;
1208         debug_object_activate(&obj, &descr_type_test);
1209         if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1210                 goto out;
1211         debug_object_deactivate(&obj, &descr_type_test);
1212         if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1213                 goto out;
1214         debug_object_destroy(&obj, &descr_type_test);
1215         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1216                 goto out;
1217         debug_object_init(&obj, &descr_type_test);
1218         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1219                 goto out;
1220         debug_object_activate(&obj, &descr_type_test);
1221         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1222                 goto out;
1223         debug_object_deactivate(&obj, &descr_type_test);
1224         if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1225                 goto out;
1226         debug_object_free(&obj, &descr_type_test);
1227         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1228                 goto out;
1229
1230         obj.static_init = 1;
1231         debug_object_activate(&obj, &descr_type_test);
1232         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1233                 goto out;
1234         debug_object_init(&obj, &descr_type_test);
1235         if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1236                 goto out;
1237         debug_object_free(&obj, &descr_type_test);
1238         if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1239                 goto out;
1240
1241 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1242         debug_object_init(&obj, &descr_type_test);
1243         if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1244                 goto out;
1245         debug_object_activate(&obj, &descr_type_test);
1246         if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1247                 goto out;
1248         __debug_check_no_obj_freed(&obj, sizeof(obj));
1249         if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1250                 goto out;
1251 #endif
1252         pr_info("selftest passed\n");
1253
1254 out:
1255         debug_objects_fixups = oldfixups;
1256         debug_objects_warnings = oldwarnings;
1257         descr_test = NULL;
1258
1259         local_irq_restore(flags);
1260 }
1261 #else
1262 static inline void debug_objects_selftest(void) { }
1263 #endif
1264
1265 /*
1266  * Called during early boot to initialize the hash buckets and link
1267  * the static object pool objects into the poll list. After this call
1268  * the object tracker is fully operational.
1269  */
1270 void __init debug_objects_early_init(void)
1271 {
1272         int i;
1273
1274         for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1275                 raw_spin_lock_init(&obj_hash[i].lock);
1276
1277         for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1278                 hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1279 }
1280
1281 /*
1282  * Convert the statically allocated objects to dynamic ones:
1283  */
1284 static int __init debug_objects_replace_static_objects(void)
1285 {
1286         struct debug_bucket *db = obj_hash;
1287         struct hlist_node *tmp;
1288         struct debug_obj *obj, *new;
1289         HLIST_HEAD(objects);
1290         int i, cnt = 0;
1291
1292         for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1293                 obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1294                 if (!obj)
1295                         goto free;
1296                 hlist_add_head(&obj->node, &objects);
1297         }
1298
1299         /*
1300          * debug_objects_mem_init() is now called early that only one CPU is up
1301          * and interrupts have been disabled, so it is safe to replace the
1302          * active object references.
1303          */
1304
1305         /* Remove the statically allocated objects from the pool */
1306         hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1307                 hlist_del(&obj->node);
1308         /* Move the allocated objects to the pool */
1309         hlist_move_list(&objects, &obj_pool);
1310
1311         /* Replace the active object references */
1312         for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1313                 hlist_move_list(&db->list, &objects);
1314
1315                 hlist_for_each_entry(obj, &objects, node) {
1316                         new = hlist_entry(obj_pool.first, typeof(*obj), node);
1317                         hlist_del(&new->node);
1318                         /* copy object data */
1319                         *new = *obj;
1320                         hlist_add_head(&new->node, &db->list);
1321                         cnt++;
1322                 }
1323         }
1324
1325         pr_debug("%d of %d active objects replaced\n",
1326                  cnt, obj_pool_used);
1327         return 0;
1328 free:
1329         hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1330                 hlist_del(&obj->node);
1331                 kmem_cache_free(obj_cache, obj);
1332         }
1333         return -ENOMEM;
1334 }
1335
1336 /*
1337  * Called after the kmem_caches are functional to setup a dedicated
1338  * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1339  * prevents that the debug code is called on kmem_cache_free() for the
1340  * debug tracker objects to avoid recursive calls.
1341  */
1342 void __init debug_objects_mem_init(void)
1343 {
1344         int cpu, extras;
1345
1346         if (!debug_objects_enabled)
1347                 return;
1348
1349         /*
1350          * Initialize the percpu object pools
1351          *
1352          * Initialization is not strictly necessary, but was done for
1353          * completeness.
1354          */
1355         for_each_possible_cpu(cpu)
1356                 INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1357
1358         obj_cache = kmem_cache_create("debug_objects_cache",
1359                                       sizeof (struct debug_obj), 0,
1360                                       SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1361                                       NULL);
1362
1363         if (!obj_cache || debug_objects_replace_static_objects()) {
1364                 debug_objects_enabled = 0;
1365                 kmem_cache_destroy(obj_cache);
1366                 pr_warn("out of memory.\n");
1367         } else
1368                 debug_objects_selftest();
1369
1370         /*
1371          * Increase the thresholds for allocating and freeing objects
1372          * according to the number of possible CPUs available in the system.
1373          */
1374         extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1375         debug_objects_pool_size += extras;
1376         debug_objects_pool_min_level += extras;
1377 }