net: dsa: sja1105: Remove duplicate rgmii_pad_mii_tx from regs
[platform/kernel/linux-rpi.git] / mm / kmemleak.c
1 /*
2  * mm/kmemleak.c
3  *
4  * Copyright (C) 2008 ARM Limited
5  * Written by Catalin Marinas <catalin.marinas@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19  *
20  *
21  * For more information on the algorithm and kmemleak usage, please see
22  * Documentation/dev-tools/kmemleak.rst.
23  *
24  * Notes on locking
25  * ----------------
26  *
27  * The following locks and mutexes are used by kmemleak:
28  *
29  * - kmemleak_lock (rwlock): protects the object_list modifications and
30  *   accesses to the object_tree_root. The object_list is the main list
31  *   holding the metadata (struct kmemleak_object) for the allocated memory
32  *   blocks. The object_tree_root is a red black tree used to look-up
33  *   metadata based on a pointer to the corresponding memory block.  The
34  *   kmemleak_object structures are added to the object_list and
35  *   object_tree_root in the create_object() function called from the
36  *   kmemleak_alloc() callback and removed in delete_object() called from the
37  *   kmemleak_free() callback
38  * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
39  *   the metadata (e.g. count) are protected by this lock. Note that some
40  *   members of this structure may be protected by other means (atomic or
41  *   kmemleak_lock). This lock is also held when scanning the corresponding
42  *   memory block to avoid the kernel freeing it via the kmemleak_free()
43  *   callback. This is less heavyweight than holding a global lock like
44  *   kmemleak_lock during scanning
45  * - scan_mutex (mutex): ensures that only one thread may scan the memory for
46  *   unreferenced objects at a time. The gray_list contains the objects which
47  *   are already referenced or marked as false positives and need to be
48  *   scanned. This list is only modified during a scanning episode when the
49  *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
50  *   Note that the kmemleak_object.use_count is incremented when an object is
51  *   added to the gray_list and therefore cannot be freed. This mutex also
52  *   prevents multiple users of the "kmemleak" debugfs file together with
53  *   modifications to the memory scanning parameters including the scan_thread
54  *   pointer
55  *
56  * Locks and mutexes are acquired/nested in the following order:
57  *
58  *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
59  *
60  * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
61  * regions.
62  *
63  * The kmemleak_object structures have a use_count incremented or decremented
64  * using the get_object()/put_object() functions. When the use_count becomes
65  * 0, this count can no longer be incremented and put_object() schedules the
66  * kmemleak_object freeing via an RCU callback. All calls to the get_object()
67  * function must be protected by rcu_read_lock() to avoid accessing a freed
68  * structure.
69  */
70
71 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
72
73 #include <linux/init.h>
74 #include <linux/kernel.h>
75 #include <linux/list.h>
76 #include <linux/sched/signal.h>
77 #include <linux/sched/task.h>
78 #include <linux/sched/task_stack.h>
79 #include <linux/jiffies.h>
80 #include <linux/delay.h>
81 #include <linux/export.h>
82 #include <linux/kthread.h>
83 #include <linux/rbtree.h>
84 #include <linux/fs.h>
85 #include <linux/debugfs.h>
86 #include <linux/seq_file.h>
87 #include <linux/cpumask.h>
88 #include <linux/spinlock.h>
89 #include <linux/module.h>
90 #include <linux/mutex.h>
91 #include <linux/rcupdate.h>
92 #include <linux/stacktrace.h>
93 #include <linux/cache.h>
94 #include <linux/percpu.h>
95 #include <linux/memblock.h>
96 #include <linux/pfn.h>
97 #include <linux/mmzone.h>
98 #include <linux/slab.h>
99 #include <linux/thread_info.h>
100 #include <linux/err.h>
101 #include <linux/uaccess.h>
102 #include <linux/string.h>
103 #include <linux/nodemask.h>
104 #include <linux/mm.h>
105 #include <linux/workqueue.h>
106 #include <linux/crc32.h>
107
108 #include <asm/sections.h>
109 #include <asm/processor.h>
110 #include <linux/atomic.h>
111
112 #include <linux/kasan.h>
113 #include <linux/kmemleak.h>
114 #include <linux/memory_hotplug.h>
115
116 /*
117  * Kmemleak configuration and common defines.
118  */
119 #define MAX_TRACE               16      /* stack trace length */
120 #define MSECS_MIN_AGE           5000    /* minimum object age for reporting */
121 #define SECS_FIRST_SCAN         60      /* delay before the first scan */
122 #define SECS_SCAN_WAIT          600     /* subsequent auto scanning delay */
123 #define MAX_SCAN_SIZE           4096    /* maximum size of a scanned block */
124
125 #define BYTES_PER_POINTER       sizeof(void *)
126
127 /* GFP bitmask for kmemleak internal allocations */
128 #define gfp_kmemleak_mask(gfp)  (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
129                                  __GFP_NORETRY | __GFP_NOMEMALLOC | \
130                                  __GFP_NOWARN | __GFP_NOFAIL)
131
132 /* scanning area inside a memory block */
133 struct kmemleak_scan_area {
134         struct hlist_node node;
135         unsigned long start;
136         size_t size;
137 };
138
139 #define KMEMLEAK_GREY   0
140 #define KMEMLEAK_BLACK  -1
141
142 /*
143  * Structure holding the metadata for each allocated memory block.
144  * Modifications to such objects should be made while holding the
145  * object->lock. Insertions or deletions from object_list, gray_list or
146  * rb_node are already protected by the corresponding locks or mutex (see
147  * the notes on locking above). These objects are reference-counted
148  * (use_count) and freed using the RCU mechanism.
149  */
150 struct kmemleak_object {
151         spinlock_t lock;
152         unsigned int flags;             /* object status flags */
153         struct list_head object_list;
154         struct list_head gray_list;
155         struct rb_node rb_node;
156         struct rcu_head rcu;            /* object_list lockless traversal */
157         /* object usage count; object freed when use_count == 0 */
158         atomic_t use_count;
159         unsigned long pointer;
160         size_t size;
161         /* pass surplus references to this pointer */
162         unsigned long excess_ref;
163         /* minimum number of a pointers found before it is considered leak */
164         int min_count;
165         /* the total number of pointers found pointing to this object */
166         int count;
167         /* checksum for detecting modified objects */
168         u32 checksum;
169         /* memory ranges to be scanned inside an object (empty for all) */
170         struct hlist_head area_list;
171         unsigned long trace[MAX_TRACE];
172         unsigned int trace_len;
173         unsigned long jiffies;          /* creation timestamp */
174         pid_t pid;                      /* pid of the current task */
175         char comm[TASK_COMM_LEN];       /* executable name */
176 };
177
178 /* flag representing the memory block allocation status */
179 #define OBJECT_ALLOCATED        (1 << 0)
180 /* flag set after the first reporting of an unreference object */
181 #define OBJECT_REPORTED         (1 << 1)
182 /* flag set to not scan the object */
183 #define OBJECT_NO_SCAN          (1 << 2)
184
185 #define HEX_PREFIX              "    "
186 /* number of bytes to print per line; must be 16 or 32 */
187 #define HEX_ROW_SIZE            16
188 /* number of bytes to print at a time (1, 2, 4, 8) */
189 #define HEX_GROUP_SIZE          1
190 /* include ASCII after the hex output */
191 #define HEX_ASCII               1
192 /* max number of lines to be printed */
193 #define HEX_MAX_LINES           2
194
195 /* the list of all allocated objects */
196 static LIST_HEAD(object_list);
197 /* the list of gray-colored objects (see color_gray comment below) */
198 static LIST_HEAD(gray_list);
199 /* search tree for object boundaries */
200 static struct rb_root object_tree_root = RB_ROOT;
201 /* rw_lock protecting the access to object_list and object_tree_root */
202 static DEFINE_RWLOCK(kmemleak_lock);
203
204 /* allocation caches for kmemleak internal data */
205 static struct kmem_cache *object_cache;
206 static struct kmem_cache *scan_area_cache;
207
208 /* set if tracing memory operations is enabled */
209 static int kmemleak_enabled;
210 /* same as above but only for the kmemleak_free() callback */
211 static int kmemleak_free_enabled;
212 /* set in the late_initcall if there were no errors */
213 static int kmemleak_initialized;
214 /* enables or disables early logging of the memory operations */
215 static int kmemleak_early_log = 1;
216 /* set if a kmemleak warning was issued */
217 static int kmemleak_warning;
218 /* set if a fatal kmemleak error has occurred */
219 static int kmemleak_error;
220
221 /* minimum and maximum address that may be valid pointers */
222 static unsigned long min_addr = ULONG_MAX;
223 static unsigned long max_addr;
224
225 static struct task_struct *scan_thread;
226 /* used to avoid reporting of recently allocated objects */
227 static unsigned long jiffies_min_age;
228 static unsigned long jiffies_last_scan;
229 /* delay between automatic memory scannings */
230 static signed long jiffies_scan_wait;
231 /* enables or disables the task stacks scanning */
232 static int kmemleak_stack_scan = 1;
233 /* protects the memory scanning, parameters and debug/kmemleak file access */
234 static DEFINE_MUTEX(scan_mutex);
235 /* setting kmemleak=on, will set this var, skipping the disable */
236 static int kmemleak_skip_disable;
237 /* If there are leaks that can be reported */
238 static bool kmemleak_found_leaks;
239
240 static bool kmemleak_verbose;
241 module_param_named(verbose, kmemleak_verbose, bool, 0600);
242
243 /*
244  * Early object allocation/freeing logging. Kmemleak is initialized after the
245  * kernel allocator. However, both the kernel allocator and kmemleak may
246  * allocate memory blocks which need to be tracked. Kmemleak defines an
247  * arbitrary buffer to hold the allocation/freeing information before it is
248  * fully initialized.
249  */
250
251 /* kmemleak operation type for early logging */
252 enum {
253         KMEMLEAK_ALLOC,
254         KMEMLEAK_ALLOC_PERCPU,
255         KMEMLEAK_FREE,
256         KMEMLEAK_FREE_PART,
257         KMEMLEAK_FREE_PERCPU,
258         KMEMLEAK_NOT_LEAK,
259         KMEMLEAK_IGNORE,
260         KMEMLEAK_SCAN_AREA,
261         KMEMLEAK_NO_SCAN,
262         KMEMLEAK_SET_EXCESS_REF
263 };
264
265 /*
266  * Structure holding the information passed to kmemleak callbacks during the
267  * early logging.
268  */
269 struct early_log {
270         int op_type;                    /* kmemleak operation type */
271         int min_count;                  /* minimum reference count */
272         const void *ptr;                /* allocated/freed memory block */
273         union {
274                 size_t size;            /* memory block size */
275                 unsigned long excess_ref; /* surplus reference passing */
276         };
277         unsigned long trace[MAX_TRACE]; /* stack trace */
278         unsigned int trace_len;         /* stack trace length */
279 };
280
281 /* early logging buffer and current position */
282 static struct early_log
283         early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
284 static int crt_early_log __initdata;
285
286 static void kmemleak_disable(void);
287
288 /*
289  * Print a warning and dump the stack trace.
290  */
291 #define kmemleak_warn(x...)     do {            \
292         pr_warn(x);                             \
293         dump_stack();                           \
294         kmemleak_warning = 1;                   \
295 } while (0)
296
297 /*
298  * Macro invoked when a serious kmemleak condition occurred and cannot be
299  * recovered from. Kmemleak will be disabled and further allocation/freeing
300  * tracing no longer available.
301  */
302 #define kmemleak_stop(x...)     do {    \
303         kmemleak_warn(x);               \
304         kmemleak_disable();             \
305 } while (0)
306
307 #define warn_or_seq_printf(seq, fmt, ...)       do {    \
308         if (seq)                                        \
309                 seq_printf(seq, fmt, ##__VA_ARGS__);    \
310         else                                            \
311                 pr_warn(fmt, ##__VA_ARGS__);            \
312 } while (0)
313
314 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
315                                  int rowsize, int groupsize, const void *buf,
316                                  size_t len, bool ascii)
317 {
318         if (seq)
319                 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
320                              buf, len, ascii);
321         else
322                 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
323                                rowsize, groupsize, buf, len, ascii);
324 }
325
326 /*
327  * Printing of the objects hex dump to the seq file. The number of lines to be
328  * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
329  * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
330  * with the object->lock held.
331  */
332 static void hex_dump_object(struct seq_file *seq,
333                             struct kmemleak_object *object)
334 {
335         const u8 *ptr = (const u8 *)object->pointer;
336         size_t len;
337
338         /* limit the number of lines to HEX_MAX_LINES */
339         len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
340
341         warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
342         kasan_disable_current();
343         warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
344                              HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
345         kasan_enable_current();
346 }
347
348 /*
349  * Object colors, encoded with count and min_count:
350  * - white - orphan object, not enough references to it (count < min_count)
351  * - gray  - not orphan, not marked as false positive (min_count == 0) or
352  *              sufficient references to it (count >= min_count)
353  * - black - ignore, it doesn't contain references (e.g. text section)
354  *              (min_count == -1). No function defined for this color.
355  * Newly created objects don't have any color assigned (object->count == -1)
356  * before the next memory scan when they become white.
357  */
358 static bool color_white(const struct kmemleak_object *object)
359 {
360         return object->count != KMEMLEAK_BLACK &&
361                 object->count < object->min_count;
362 }
363
364 static bool color_gray(const struct kmemleak_object *object)
365 {
366         return object->min_count != KMEMLEAK_BLACK &&
367                 object->count >= object->min_count;
368 }
369
370 /*
371  * Objects are considered unreferenced only if their color is white, they have
372  * not be deleted and have a minimum age to avoid false positives caused by
373  * pointers temporarily stored in CPU registers.
374  */
375 static bool unreferenced_object(struct kmemleak_object *object)
376 {
377         return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
378                 time_before_eq(object->jiffies + jiffies_min_age,
379                                jiffies_last_scan);
380 }
381
382 /*
383  * Printing of the unreferenced objects information to the seq file. The
384  * print_unreferenced function must be called with the object->lock held.
385  */
386 static void print_unreferenced(struct seq_file *seq,
387                                struct kmemleak_object *object)
388 {
389         int i;
390         unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
391
392         warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
393                    object->pointer, object->size);
394         warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
395                    object->comm, object->pid, object->jiffies,
396                    msecs_age / 1000, msecs_age % 1000);
397         hex_dump_object(seq, object);
398         warn_or_seq_printf(seq, "  backtrace:\n");
399
400         for (i = 0; i < object->trace_len; i++) {
401                 void *ptr = (void *)object->trace[i];
402                 warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
403         }
404 }
405
406 /*
407  * Print the kmemleak_object information. This function is used mainly for
408  * debugging special cases when kmemleak operations. It must be called with
409  * the object->lock held.
410  */
411 static void dump_object_info(struct kmemleak_object *object)
412 {
413         pr_notice("Object 0x%08lx (size %zu):\n",
414                   object->pointer, object->size);
415         pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
416                   object->comm, object->pid, object->jiffies);
417         pr_notice("  min_count = %d\n", object->min_count);
418         pr_notice("  count = %d\n", object->count);
419         pr_notice("  flags = 0x%x\n", object->flags);
420         pr_notice("  checksum = %u\n", object->checksum);
421         pr_notice("  backtrace:\n");
422         stack_trace_print(object->trace, object->trace_len, 4);
423 }
424
425 /*
426  * Look-up a memory block metadata (kmemleak_object) in the object search
427  * tree based on a pointer value. If alias is 0, only values pointing to the
428  * beginning of the memory block are allowed. The kmemleak_lock must be held
429  * when calling this function.
430  */
431 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
432 {
433         struct rb_node *rb = object_tree_root.rb_node;
434
435         while (rb) {
436                 struct kmemleak_object *object =
437                         rb_entry(rb, struct kmemleak_object, rb_node);
438                 if (ptr < object->pointer)
439                         rb = object->rb_node.rb_left;
440                 else if (object->pointer + object->size <= ptr)
441                         rb = object->rb_node.rb_right;
442                 else if (object->pointer == ptr || alias)
443                         return object;
444                 else {
445                         kmemleak_warn("Found object by alias at 0x%08lx\n",
446                                       ptr);
447                         dump_object_info(object);
448                         break;
449                 }
450         }
451         return NULL;
452 }
453
454 /*
455  * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
456  * that once an object's use_count reached 0, the RCU freeing was already
457  * registered and the object should no longer be used. This function must be
458  * called under the protection of rcu_read_lock().
459  */
460 static int get_object(struct kmemleak_object *object)
461 {
462         return atomic_inc_not_zero(&object->use_count);
463 }
464
465 /*
466  * RCU callback to free a kmemleak_object.
467  */
468 static void free_object_rcu(struct rcu_head *rcu)
469 {
470         struct hlist_node *tmp;
471         struct kmemleak_scan_area *area;
472         struct kmemleak_object *object =
473                 container_of(rcu, struct kmemleak_object, rcu);
474
475         /*
476          * Once use_count is 0 (guaranteed by put_object), there is no other
477          * code accessing this object, hence no need for locking.
478          */
479         hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
480                 hlist_del(&area->node);
481                 kmem_cache_free(scan_area_cache, area);
482         }
483         kmem_cache_free(object_cache, object);
484 }
485
486 /*
487  * Decrement the object use_count. Once the count is 0, free the object using
488  * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
489  * delete_object() path, the delayed RCU freeing ensures that there is no
490  * recursive call to the kernel allocator. Lock-less RCU object_list traversal
491  * is also possible.
492  */
493 static void put_object(struct kmemleak_object *object)
494 {
495         if (!atomic_dec_and_test(&object->use_count))
496                 return;
497
498         /* should only get here after delete_object was called */
499         WARN_ON(object->flags & OBJECT_ALLOCATED);
500
501         call_rcu(&object->rcu, free_object_rcu);
502 }
503
504 /*
505  * Look up an object in the object search tree and increase its use_count.
506  */
507 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
508 {
509         unsigned long flags;
510         struct kmemleak_object *object;
511
512         rcu_read_lock();
513         read_lock_irqsave(&kmemleak_lock, flags);
514         object = lookup_object(ptr, alias);
515         read_unlock_irqrestore(&kmemleak_lock, flags);
516
517         /* check whether the object is still available */
518         if (object && !get_object(object))
519                 object = NULL;
520         rcu_read_unlock();
521
522         return object;
523 }
524
525 /*
526  * Look up an object in the object search tree and remove it from both
527  * object_tree_root and object_list. The returned object's use_count should be
528  * at least 1, as initially set by create_object().
529  */
530 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
531 {
532         unsigned long flags;
533         struct kmemleak_object *object;
534
535         write_lock_irqsave(&kmemleak_lock, flags);
536         object = lookup_object(ptr, alias);
537         if (object) {
538                 rb_erase(&object->rb_node, &object_tree_root);
539                 list_del_rcu(&object->object_list);
540         }
541         write_unlock_irqrestore(&kmemleak_lock, flags);
542
543         return object;
544 }
545
546 /*
547  * Save stack trace to the given array of MAX_TRACE size.
548  */
549 static int __save_stack_trace(unsigned long *trace)
550 {
551         return stack_trace_save(trace, MAX_TRACE, 2);
552 }
553
554 /*
555  * Create the metadata (struct kmemleak_object) corresponding to an allocated
556  * memory block and add it to the object_list and object_tree_root.
557  */
558 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
559                                              int min_count, gfp_t gfp)
560 {
561         unsigned long flags;
562         struct kmemleak_object *object, *parent;
563         struct rb_node **link, *rb_parent;
564         unsigned long untagged_ptr;
565
566         object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
567         if (!object) {
568                 pr_warn("Cannot allocate a kmemleak_object structure\n");
569                 kmemleak_disable();
570                 return NULL;
571         }
572
573         INIT_LIST_HEAD(&object->object_list);
574         INIT_LIST_HEAD(&object->gray_list);
575         INIT_HLIST_HEAD(&object->area_list);
576         spin_lock_init(&object->lock);
577         atomic_set(&object->use_count, 1);
578         object->flags = OBJECT_ALLOCATED;
579         object->pointer = ptr;
580         object->size = size;
581         object->excess_ref = 0;
582         object->min_count = min_count;
583         object->count = 0;                      /* white color initially */
584         object->jiffies = jiffies;
585         object->checksum = 0;
586
587         /* task information */
588         if (in_irq()) {
589                 object->pid = 0;
590                 strncpy(object->comm, "hardirq", sizeof(object->comm));
591         } else if (in_softirq()) {
592                 object->pid = 0;
593                 strncpy(object->comm, "softirq", sizeof(object->comm));
594         } else {
595                 object->pid = current->pid;
596                 /*
597                  * There is a small chance of a race with set_task_comm(),
598                  * however using get_task_comm() here may cause locking
599                  * dependency issues with current->alloc_lock. In the worst
600                  * case, the command line is not correct.
601                  */
602                 strncpy(object->comm, current->comm, sizeof(object->comm));
603         }
604
605         /* kernel backtrace */
606         object->trace_len = __save_stack_trace(object->trace);
607
608         write_lock_irqsave(&kmemleak_lock, flags);
609
610         untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
611         min_addr = min(min_addr, untagged_ptr);
612         max_addr = max(max_addr, untagged_ptr + size);
613         link = &object_tree_root.rb_node;
614         rb_parent = NULL;
615         while (*link) {
616                 rb_parent = *link;
617                 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
618                 if (ptr + size <= parent->pointer)
619                         link = &parent->rb_node.rb_left;
620                 else if (parent->pointer + parent->size <= ptr)
621                         link = &parent->rb_node.rb_right;
622                 else {
623                         kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
624                                       ptr);
625                         /*
626                          * No need for parent->lock here since "parent" cannot
627                          * be freed while the kmemleak_lock is held.
628                          */
629                         dump_object_info(parent);
630                         kmem_cache_free(object_cache, object);
631                         object = NULL;
632                         goto out;
633                 }
634         }
635         rb_link_node(&object->rb_node, rb_parent, link);
636         rb_insert_color(&object->rb_node, &object_tree_root);
637
638         list_add_tail_rcu(&object->object_list, &object_list);
639 out:
640         write_unlock_irqrestore(&kmemleak_lock, flags);
641         return object;
642 }
643
644 /*
645  * Mark the object as not allocated and schedule RCU freeing via put_object().
646  */
647 static void __delete_object(struct kmemleak_object *object)
648 {
649         unsigned long flags;
650
651         WARN_ON(!(object->flags & OBJECT_ALLOCATED));
652         WARN_ON(atomic_read(&object->use_count) < 1);
653
654         /*
655          * Locking here also ensures that the corresponding memory block
656          * cannot be freed when it is being scanned.
657          */
658         spin_lock_irqsave(&object->lock, flags);
659         object->flags &= ~OBJECT_ALLOCATED;
660         spin_unlock_irqrestore(&object->lock, flags);
661         put_object(object);
662 }
663
664 /*
665  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
666  * delete it.
667  */
668 static void delete_object_full(unsigned long ptr)
669 {
670         struct kmemleak_object *object;
671
672         object = find_and_remove_object(ptr, 0);
673         if (!object) {
674 #ifdef DEBUG
675                 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
676                               ptr);
677 #endif
678                 return;
679         }
680         __delete_object(object);
681 }
682
683 /*
684  * Look up the metadata (struct kmemleak_object) corresponding to ptr and
685  * delete it. If the memory block is partially freed, the function may create
686  * additional metadata for the remaining parts of the block.
687  */
688 static void delete_object_part(unsigned long ptr, size_t size)
689 {
690         struct kmemleak_object *object;
691         unsigned long start, end;
692
693         object = find_and_remove_object(ptr, 1);
694         if (!object) {
695 #ifdef DEBUG
696                 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
697                               ptr, size);
698 #endif
699                 return;
700         }
701
702         /*
703          * Create one or two objects that may result from the memory block
704          * split. Note that partial freeing is only done by free_bootmem() and
705          * this happens before kmemleak_init() is called. The path below is
706          * only executed during early log recording in kmemleak_init(), so
707          * GFP_KERNEL is enough.
708          */
709         start = object->pointer;
710         end = object->pointer + object->size;
711         if (ptr > start)
712                 create_object(start, ptr - start, object->min_count,
713                               GFP_KERNEL);
714         if (ptr + size < end)
715                 create_object(ptr + size, end - ptr - size, object->min_count,
716                               GFP_KERNEL);
717
718         __delete_object(object);
719 }
720
721 static void __paint_it(struct kmemleak_object *object, int color)
722 {
723         object->min_count = color;
724         if (color == KMEMLEAK_BLACK)
725                 object->flags |= OBJECT_NO_SCAN;
726 }
727
728 static void paint_it(struct kmemleak_object *object, int color)
729 {
730         unsigned long flags;
731
732         spin_lock_irqsave(&object->lock, flags);
733         __paint_it(object, color);
734         spin_unlock_irqrestore(&object->lock, flags);
735 }
736
737 static void paint_ptr(unsigned long ptr, int color)
738 {
739         struct kmemleak_object *object;
740
741         object = find_and_get_object(ptr, 0);
742         if (!object) {
743                 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
744                               ptr,
745                               (color == KMEMLEAK_GREY) ? "Grey" :
746                               (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
747                 return;
748         }
749         paint_it(object, color);
750         put_object(object);
751 }
752
753 /*
754  * Mark an object permanently as gray-colored so that it can no longer be
755  * reported as a leak. This is used in general to mark a false positive.
756  */
757 static void make_gray_object(unsigned long ptr)
758 {
759         paint_ptr(ptr, KMEMLEAK_GREY);
760 }
761
762 /*
763  * Mark the object as black-colored so that it is ignored from scans and
764  * reporting.
765  */
766 static void make_black_object(unsigned long ptr)
767 {
768         paint_ptr(ptr, KMEMLEAK_BLACK);
769 }
770
771 /*
772  * Add a scanning area to the object. If at least one such area is added,
773  * kmemleak will only scan these ranges rather than the whole memory block.
774  */
775 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
776 {
777         unsigned long flags;
778         struct kmemleak_object *object;
779         struct kmemleak_scan_area *area;
780
781         object = find_and_get_object(ptr, 1);
782         if (!object) {
783                 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
784                               ptr);
785                 return;
786         }
787
788         area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
789         if (!area) {
790                 pr_warn("Cannot allocate a scan area\n");
791                 goto out;
792         }
793
794         spin_lock_irqsave(&object->lock, flags);
795         if (size == SIZE_MAX) {
796                 size = object->pointer + object->size - ptr;
797         } else if (ptr + size > object->pointer + object->size) {
798                 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
799                 dump_object_info(object);
800                 kmem_cache_free(scan_area_cache, area);
801                 goto out_unlock;
802         }
803
804         INIT_HLIST_NODE(&area->node);
805         area->start = ptr;
806         area->size = size;
807
808         hlist_add_head(&area->node, &object->area_list);
809 out_unlock:
810         spin_unlock_irqrestore(&object->lock, flags);
811 out:
812         put_object(object);
813 }
814
815 /*
816  * Any surplus references (object already gray) to 'ptr' are passed to
817  * 'excess_ref'. This is used in the vmalloc() case where a pointer to
818  * vm_struct may be used as an alternative reference to the vmalloc'ed object
819  * (see free_thread_stack()).
820  */
821 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
822 {
823         unsigned long flags;
824         struct kmemleak_object *object;
825
826         object = find_and_get_object(ptr, 0);
827         if (!object) {
828                 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
829                               ptr);
830                 return;
831         }
832
833         spin_lock_irqsave(&object->lock, flags);
834         object->excess_ref = excess_ref;
835         spin_unlock_irqrestore(&object->lock, flags);
836         put_object(object);
837 }
838
839 /*
840  * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
841  * pointer. Such object will not be scanned by kmemleak but references to it
842  * are searched.
843  */
844 static void object_no_scan(unsigned long ptr)
845 {
846         unsigned long flags;
847         struct kmemleak_object *object;
848
849         object = find_and_get_object(ptr, 0);
850         if (!object) {
851                 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
852                 return;
853         }
854
855         spin_lock_irqsave(&object->lock, flags);
856         object->flags |= OBJECT_NO_SCAN;
857         spin_unlock_irqrestore(&object->lock, flags);
858         put_object(object);
859 }
860
861 /*
862  * Log an early kmemleak_* call to the early_log buffer. These calls will be
863  * processed later once kmemleak is fully initialized.
864  */
865 static void __init log_early(int op_type, const void *ptr, size_t size,
866                              int min_count)
867 {
868         unsigned long flags;
869         struct early_log *log;
870
871         if (kmemleak_error) {
872                 /* kmemleak stopped recording, just count the requests */
873                 crt_early_log++;
874                 return;
875         }
876
877         if (crt_early_log >= ARRAY_SIZE(early_log)) {
878                 crt_early_log++;
879                 kmemleak_disable();
880                 return;
881         }
882
883         /*
884          * There is no need for locking since the kernel is still in UP mode
885          * at this stage. Disabling the IRQs is enough.
886          */
887         local_irq_save(flags);
888         log = &early_log[crt_early_log];
889         log->op_type = op_type;
890         log->ptr = ptr;
891         log->size = size;
892         log->min_count = min_count;
893         log->trace_len = __save_stack_trace(log->trace);
894         crt_early_log++;
895         local_irq_restore(flags);
896 }
897
898 /*
899  * Log an early allocated block and populate the stack trace.
900  */
901 static void early_alloc(struct early_log *log)
902 {
903         struct kmemleak_object *object;
904         unsigned long flags;
905         int i;
906
907         if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
908                 return;
909
910         /*
911          * RCU locking needed to ensure object is not freed via put_object().
912          */
913         rcu_read_lock();
914         object = create_object((unsigned long)log->ptr, log->size,
915                                log->min_count, GFP_ATOMIC);
916         if (!object)
917                 goto out;
918         spin_lock_irqsave(&object->lock, flags);
919         for (i = 0; i < log->trace_len; i++)
920                 object->trace[i] = log->trace[i];
921         object->trace_len = log->trace_len;
922         spin_unlock_irqrestore(&object->lock, flags);
923 out:
924         rcu_read_unlock();
925 }
926
927 /*
928  * Log an early allocated block and populate the stack trace.
929  */
930 static void early_alloc_percpu(struct early_log *log)
931 {
932         unsigned int cpu;
933         const void __percpu *ptr = log->ptr;
934
935         for_each_possible_cpu(cpu) {
936                 log->ptr = per_cpu_ptr(ptr, cpu);
937                 early_alloc(log);
938         }
939 }
940
941 /**
942  * kmemleak_alloc - register a newly allocated object
943  * @ptr:        pointer to beginning of the object
944  * @size:       size of the object
945  * @min_count:  minimum number of references to this object. If during memory
946  *              scanning a number of references less than @min_count is found,
947  *              the object is reported as a memory leak. If @min_count is 0,
948  *              the object is never reported as a leak. If @min_count is -1,
949  *              the object is ignored (not scanned and not reported as a leak)
950  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
951  *
952  * This function is called from the kernel allocators when a new object
953  * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
954  */
955 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
956                           gfp_t gfp)
957 {
958         pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
959
960         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
961                 create_object((unsigned long)ptr, size, min_count, gfp);
962         else if (kmemleak_early_log)
963                 log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
964 }
965 EXPORT_SYMBOL_GPL(kmemleak_alloc);
966
967 /**
968  * kmemleak_alloc_percpu - register a newly allocated __percpu object
969  * @ptr:        __percpu pointer to beginning of the object
970  * @size:       size of the object
971  * @gfp:        flags used for kmemleak internal memory allocations
972  *
973  * This function is called from the kernel percpu allocator when a new object
974  * (memory block) is allocated (alloc_percpu).
975  */
976 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
977                                  gfp_t gfp)
978 {
979         unsigned int cpu;
980
981         pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
982
983         /*
984          * Percpu allocations are only scanned and not reported as leaks
985          * (min_count is set to 0).
986          */
987         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
988                 for_each_possible_cpu(cpu)
989                         create_object((unsigned long)per_cpu_ptr(ptr, cpu),
990                                       size, 0, gfp);
991         else if (kmemleak_early_log)
992                 log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
993 }
994 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
995
996 /**
997  * kmemleak_vmalloc - register a newly vmalloc'ed object
998  * @area:       pointer to vm_struct
999  * @size:       size of the object
1000  * @gfp:        __vmalloc() flags used for kmemleak internal memory allocations
1001  *
1002  * This function is called from the vmalloc() kernel allocator when a new
1003  * object (memory block) is allocated.
1004  */
1005 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1006 {
1007         pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1008
1009         /*
1010          * A min_count = 2 is needed because vm_struct contains a reference to
1011          * the virtual address of the vmalloc'ed block.
1012          */
1013         if (kmemleak_enabled) {
1014                 create_object((unsigned long)area->addr, size, 2, gfp);
1015                 object_set_excess_ref((unsigned long)area,
1016                                       (unsigned long)area->addr);
1017         } else if (kmemleak_early_log) {
1018                 log_early(KMEMLEAK_ALLOC, area->addr, size, 2);
1019                 /* reusing early_log.size for storing area->addr */
1020                 log_early(KMEMLEAK_SET_EXCESS_REF,
1021                           area, (unsigned long)area->addr, 0);
1022         }
1023 }
1024 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1025
1026 /**
1027  * kmemleak_free - unregister a previously registered object
1028  * @ptr:        pointer to beginning of the object
1029  *
1030  * This function is called from the kernel allocators when an object (memory
1031  * block) is freed (kmem_cache_free, kfree, vfree etc.).
1032  */
1033 void __ref kmemleak_free(const void *ptr)
1034 {
1035         pr_debug("%s(0x%p)\n", __func__, ptr);
1036
1037         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1038                 delete_object_full((unsigned long)ptr);
1039         else if (kmemleak_early_log)
1040                 log_early(KMEMLEAK_FREE, ptr, 0, 0);
1041 }
1042 EXPORT_SYMBOL_GPL(kmemleak_free);
1043
1044 /**
1045  * kmemleak_free_part - partially unregister a previously registered object
1046  * @ptr:        pointer to the beginning or inside the object. This also
1047  *              represents the start of the range to be freed
1048  * @size:       size to be unregistered
1049  *
1050  * This function is called when only a part of a memory block is freed
1051  * (usually from the bootmem allocator).
1052  */
1053 void __ref kmemleak_free_part(const void *ptr, size_t size)
1054 {
1055         pr_debug("%s(0x%p)\n", __func__, ptr);
1056
1057         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1058                 delete_object_part((unsigned long)ptr, size);
1059         else if (kmemleak_early_log)
1060                 log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
1061 }
1062 EXPORT_SYMBOL_GPL(kmemleak_free_part);
1063
1064 /**
1065  * kmemleak_free_percpu - unregister a previously registered __percpu object
1066  * @ptr:        __percpu pointer to beginning of the object
1067  *
1068  * This function is called from the kernel percpu allocator when an object
1069  * (memory block) is freed (free_percpu).
1070  */
1071 void __ref kmemleak_free_percpu(const void __percpu *ptr)
1072 {
1073         unsigned int cpu;
1074
1075         pr_debug("%s(0x%p)\n", __func__, ptr);
1076
1077         if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1078                 for_each_possible_cpu(cpu)
1079                         delete_object_full((unsigned long)per_cpu_ptr(ptr,
1080                                                                       cpu));
1081         else if (kmemleak_early_log)
1082                 log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
1083 }
1084 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1085
1086 /**
1087  * kmemleak_update_trace - update object allocation stack trace
1088  * @ptr:        pointer to beginning of the object
1089  *
1090  * Override the object allocation stack trace for cases where the actual
1091  * allocation place is not always useful.
1092  */
1093 void __ref kmemleak_update_trace(const void *ptr)
1094 {
1095         struct kmemleak_object *object;
1096         unsigned long flags;
1097
1098         pr_debug("%s(0x%p)\n", __func__, ptr);
1099
1100         if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1101                 return;
1102
1103         object = find_and_get_object((unsigned long)ptr, 1);
1104         if (!object) {
1105 #ifdef DEBUG
1106                 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1107                               ptr);
1108 #endif
1109                 return;
1110         }
1111
1112         spin_lock_irqsave(&object->lock, flags);
1113         object->trace_len = __save_stack_trace(object->trace);
1114         spin_unlock_irqrestore(&object->lock, flags);
1115
1116         put_object(object);
1117 }
1118 EXPORT_SYMBOL(kmemleak_update_trace);
1119
1120 /**
1121  * kmemleak_not_leak - mark an allocated object as false positive
1122  * @ptr:        pointer to beginning of the object
1123  *
1124  * Calling this function on an object will cause the memory block to no longer
1125  * be reported as leak and always be scanned.
1126  */
1127 void __ref kmemleak_not_leak(const void *ptr)
1128 {
1129         pr_debug("%s(0x%p)\n", __func__, ptr);
1130
1131         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1132                 make_gray_object((unsigned long)ptr);
1133         else if (kmemleak_early_log)
1134                 log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1135 }
1136 EXPORT_SYMBOL(kmemleak_not_leak);
1137
1138 /**
1139  * kmemleak_ignore - ignore an allocated object
1140  * @ptr:        pointer to beginning of the object
1141  *
1142  * Calling this function on an object will cause the memory block to be
1143  * ignored (not scanned and not reported as a leak). This is usually done when
1144  * it is known that the corresponding block is not a leak and does not contain
1145  * any references to other allocated memory blocks.
1146  */
1147 void __ref kmemleak_ignore(const void *ptr)
1148 {
1149         pr_debug("%s(0x%p)\n", __func__, ptr);
1150
1151         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1152                 make_black_object((unsigned long)ptr);
1153         else if (kmemleak_early_log)
1154                 log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1155 }
1156 EXPORT_SYMBOL(kmemleak_ignore);
1157
1158 /**
1159  * kmemleak_scan_area - limit the range to be scanned in an allocated object
1160  * @ptr:        pointer to beginning or inside the object. This also
1161  *              represents the start of the scan area
1162  * @size:       size of the scan area
1163  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1164  *
1165  * This function is used when it is known that only certain parts of an object
1166  * contain references to other objects. Kmemleak will only scan these areas
1167  * reducing the number false negatives.
1168  */
1169 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1170 {
1171         pr_debug("%s(0x%p)\n", __func__, ptr);
1172
1173         if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1174                 add_scan_area((unsigned long)ptr, size, gfp);
1175         else if (kmemleak_early_log)
1176                 log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1177 }
1178 EXPORT_SYMBOL(kmemleak_scan_area);
1179
1180 /**
1181  * kmemleak_no_scan - do not scan an allocated object
1182  * @ptr:        pointer to beginning of the object
1183  *
1184  * This function notifies kmemleak not to scan the given memory block. Useful
1185  * in situations where it is known that the given object does not contain any
1186  * references to other objects. Kmemleak will not scan such objects reducing
1187  * the number of false negatives.
1188  */
1189 void __ref kmemleak_no_scan(const void *ptr)
1190 {
1191         pr_debug("%s(0x%p)\n", __func__, ptr);
1192
1193         if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1194                 object_no_scan((unsigned long)ptr);
1195         else if (kmemleak_early_log)
1196                 log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1197 }
1198 EXPORT_SYMBOL(kmemleak_no_scan);
1199
1200 /**
1201  * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1202  *                       address argument
1203  * @phys:       physical address of the object
1204  * @size:       size of the object
1205  * @min_count:  minimum number of references to this object.
1206  *              See kmemleak_alloc()
1207  * @gfp:        kmalloc() flags used for kmemleak internal memory allocations
1208  */
1209 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1210                                gfp_t gfp)
1211 {
1212         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1213                 kmemleak_alloc(__va(phys), size, min_count, gfp);
1214 }
1215 EXPORT_SYMBOL(kmemleak_alloc_phys);
1216
1217 /**
1218  * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1219  *                           physical address argument
1220  * @phys:       physical address if the beginning or inside an object. This
1221  *              also represents the start of the range to be freed
1222  * @size:       size to be unregistered
1223  */
1224 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1225 {
1226         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1227                 kmemleak_free_part(__va(phys), size);
1228 }
1229 EXPORT_SYMBOL(kmemleak_free_part_phys);
1230
1231 /**
1232  * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1233  *                          address argument
1234  * @phys:       physical address of the object
1235  */
1236 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1237 {
1238         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1239                 kmemleak_not_leak(__va(phys));
1240 }
1241 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1242
1243 /**
1244  * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1245  *                        address argument
1246  * @phys:       physical address of the object
1247  */
1248 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1249 {
1250         if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1251                 kmemleak_ignore(__va(phys));
1252 }
1253 EXPORT_SYMBOL(kmemleak_ignore_phys);
1254
1255 /*
1256  * Update an object's checksum and return true if it was modified.
1257  */
1258 static bool update_checksum(struct kmemleak_object *object)
1259 {
1260         u32 old_csum = object->checksum;
1261
1262         kasan_disable_current();
1263         object->checksum = crc32(0, (void *)object->pointer, object->size);
1264         kasan_enable_current();
1265
1266         return object->checksum != old_csum;
1267 }
1268
1269 /*
1270  * Update an object's references. object->lock must be held by the caller.
1271  */
1272 static void update_refs(struct kmemleak_object *object)
1273 {
1274         if (!color_white(object)) {
1275                 /* non-orphan, ignored or new */
1276                 return;
1277         }
1278
1279         /*
1280          * Increase the object's reference count (number of pointers to the
1281          * memory block). If this count reaches the required minimum, the
1282          * object's color will become gray and it will be added to the
1283          * gray_list.
1284          */
1285         object->count++;
1286         if (color_gray(object)) {
1287                 /* put_object() called when removing from gray_list */
1288                 WARN_ON(!get_object(object));
1289                 list_add_tail(&object->gray_list, &gray_list);
1290         }
1291 }
1292
1293 /*
1294  * Memory scanning is a long process and it needs to be interruptable. This
1295  * function checks whether such interrupt condition occurred.
1296  */
1297 static int scan_should_stop(void)
1298 {
1299         if (!kmemleak_enabled)
1300                 return 1;
1301
1302         /*
1303          * This function may be called from either process or kthread context,
1304          * hence the need to check for both stop conditions.
1305          */
1306         if (current->mm)
1307                 return signal_pending(current);
1308         else
1309                 return kthread_should_stop();
1310
1311         return 0;
1312 }
1313
1314 /*
1315  * Scan a memory block (exclusive range) for valid pointers and add those
1316  * found to the gray list.
1317  */
1318 static void scan_block(void *_start, void *_end,
1319                        struct kmemleak_object *scanned)
1320 {
1321         unsigned long *ptr;
1322         unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1323         unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1324         unsigned long flags;
1325         unsigned long untagged_ptr;
1326
1327         read_lock_irqsave(&kmemleak_lock, flags);
1328         for (ptr = start; ptr < end; ptr++) {
1329                 struct kmemleak_object *object;
1330                 unsigned long pointer;
1331                 unsigned long excess_ref;
1332
1333                 if (scan_should_stop())
1334                         break;
1335
1336                 kasan_disable_current();
1337                 pointer = *ptr;
1338                 kasan_enable_current();
1339
1340                 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1341                 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1342                         continue;
1343
1344                 /*
1345                  * No need for get_object() here since we hold kmemleak_lock.
1346                  * object->use_count cannot be dropped to 0 while the object
1347                  * is still present in object_tree_root and object_list
1348                  * (with updates protected by kmemleak_lock).
1349                  */
1350                 object = lookup_object(pointer, 1);
1351                 if (!object)
1352                         continue;
1353                 if (object == scanned)
1354                         /* self referenced, ignore */
1355                         continue;
1356
1357                 /*
1358                  * Avoid the lockdep recursive warning on object->lock being
1359                  * previously acquired in scan_object(). These locks are
1360                  * enclosed by scan_mutex.
1361                  */
1362                 spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1363                 /* only pass surplus references (object already gray) */
1364                 if (color_gray(object)) {
1365                         excess_ref = object->excess_ref;
1366                         /* no need for update_refs() if object already gray */
1367                 } else {
1368                         excess_ref = 0;
1369                         update_refs(object);
1370                 }
1371                 spin_unlock(&object->lock);
1372
1373                 if (excess_ref) {
1374                         object = lookup_object(excess_ref, 0);
1375                         if (!object)
1376                                 continue;
1377                         if (object == scanned)
1378                                 /* circular reference, ignore */
1379                                 continue;
1380                         spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1381                         update_refs(object);
1382                         spin_unlock(&object->lock);
1383                 }
1384         }
1385         read_unlock_irqrestore(&kmemleak_lock, flags);
1386 }
1387
1388 /*
1389  * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1390  */
1391 #ifdef CONFIG_SMP
1392 static void scan_large_block(void *start, void *end)
1393 {
1394         void *next;
1395
1396         while (start < end) {
1397                 next = min(start + MAX_SCAN_SIZE, end);
1398                 scan_block(start, next, NULL);
1399                 start = next;
1400                 cond_resched();
1401         }
1402 }
1403 #endif
1404
1405 /*
1406  * Scan a memory block corresponding to a kmemleak_object. A condition is
1407  * that object->use_count >= 1.
1408  */
1409 static void scan_object(struct kmemleak_object *object)
1410 {
1411         struct kmemleak_scan_area *area;
1412         unsigned long flags;
1413
1414         /*
1415          * Once the object->lock is acquired, the corresponding memory block
1416          * cannot be freed (the same lock is acquired in delete_object).
1417          */
1418         spin_lock_irqsave(&object->lock, flags);
1419         if (object->flags & OBJECT_NO_SCAN)
1420                 goto out;
1421         if (!(object->flags & OBJECT_ALLOCATED))
1422                 /* already freed object */
1423                 goto out;
1424         if (hlist_empty(&object->area_list)) {
1425                 void *start = (void *)object->pointer;
1426                 void *end = (void *)(object->pointer + object->size);
1427                 void *next;
1428
1429                 do {
1430                         next = min(start + MAX_SCAN_SIZE, end);
1431                         scan_block(start, next, object);
1432
1433                         start = next;
1434                         if (start >= end)
1435                                 break;
1436
1437                         spin_unlock_irqrestore(&object->lock, flags);
1438                         cond_resched();
1439                         spin_lock_irqsave(&object->lock, flags);
1440                 } while (object->flags & OBJECT_ALLOCATED);
1441         } else
1442                 hlist_for_each_entry(area, &object->area_list, node)
1443                         scan_block((void *)area->start,
1444                                    (void *)(area->start + area->size),
1445                                    object);
1446 out:
1447         spin_unlock_irqrestore(&object->lock, flags);
1448 }
1449
1450 /*
1451  * Scan the objects already referenced (gray objects). More objects will be
1452  * referenced and, if there are no memory leaks, all the objects are scanned.
1453  */
1454 static void scan_gray_list(void)
1455 {
1456         struct kmemleak_object *object, *tmp;
1457
1458         /*
1459          * The list traversal is safe for both tail additions and removals
1460          * from inside the loop. The kmemleak objects cannot be freed from
1461          * outside the loop because their use_count was incremented.
1462          */
1463         object = list_entry(gray_list.next, typeof(*object), gray_list);
1464         while (&object->gray_list != &gray_list) {
1465                 cond_resched();
1466
1467                 /* may add new objects to the list */
1468                 if (!scan_should_stop())
1469                         scan_object(object);
1470
1471                 tmp = list_entry(object->gray_list.next, typeof(*object),
1472                                  gray_list);
1473
1474                 /* remove the object from the list and release it */
1475                 list_del(&object->gray_list);
1476                 put_object(object);
1477
1478                 object = tmp;
1479         }
1480         WARN_ON(!list_empty(&gray_list));
1481 }
1482
1483 /*
1484  * Scan data sections and all the referenced memory blocks allocated via the
1485  * kernel's standard allocators. This function must be called with the
1486  * scan_mutex held.
1487  */
1488 static void kmemleak_scan(void)
1489 {
1490         unsigned long flags;
1491         struct kmemleak_object *object;
1492         int i;
1493         int new_leaks = 0;
1494
1495         jiffies_last_scan = jiffies;
1496
1497         /* prepare the kmemleak_object's */
1498         rcu_read_lock();
1499         list_for_each_entry_rcu(object, &object_list, object_list) {
1500                 spin_lock_irqsave(&object->lock, flags);
1501 #ifdef DEBUG
1502                 /*
1503                  * With a few exceptions there should be a maximum of
1504                  * 1 reference to any object at this point.
1505                  */
1506                 if (atomic_read(&object->use_count) > 1) {
1507                         pr_debug("object->use_count = %d\n",
1508                                  atomic_read(&object->use_count));
1509                         dump_object_info(object);
1510                 }
1511 #endif
1512                 /* reset the reference count (whiten the object) */
1513                 object->count = 0;
1514                 if (color_gray(object) && get_object(object))
1515                         list_add_tail(&object->gray_list, &gray_list);
1516
1517                 spin_unlock_irqrestore(&object->lock, flags);
1518         }
1519         rcu_read_unlock();
1520
1521 #ifdef CONFIG_SMP
1522         /* per-cpu sections scanning */
1523         for_each_possible_cpu(i)
1524                 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1525                                  __per_cpu_end + per_cpu_offset(i));
1526 #endif
1527
1528         /*
1529          * Struct page scanning for each node.
1530          */
1531         get_online_mems();
1532         for_each_online_node(i) {
1533                 unsigned long start_pfn = node_start_pfn(i);
1534                 unsigned long end_pfn = node_end_pfn(i);
1535                 unsigned long pfn;
1536
1537                 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1538                         struct page *page = pfn_to_online_page(pfn);
1539
1540                         if (!page)
1541                                 continue;
1542
1543                         /* only scan pages belonging to this node */
1544                         if (page_to_nid(page) != i)
1545                                 continue;
1546                         /* only scan if page is in use */
1547                         if (page_count(page) == 0)
1548                                 continue;
1549                         scan_block(page, page + 1, NULL);
1550                         if (!(pfn & 63))
1551                                 cond_resched();
1552                 }
1553         }
1554         put_online_mems();
1555
1556         /*
1557          * Scanning the task stacks (may introduce false negatives).
1558          */
1559         if (kmemleak_stack_scan) {
1560                 struct task_struct *p, *g;
1561
1562                 read_lock(&tasklist_lock);
1563                 do_each_thread(g, p) {
1564                         void *stack = try_get_task_stack(p);
1565                         if (stack) {
1566                                 scan_block(stack, stack + THREAD_SIZE, NULL);
1567                                 put_task_stack(p);
1568                         }
1569                 } while_each_thread(g, p);
1570                 read_unlock(&tasklist_lock);
1571         }
1572
1573         /*
1574          * Scan the objects already referenced from the sections scanned
1575          * above.
1576          */
1577         scan_gray_list();
1578
1579         /*
1580          * Check for new or unreferenced objects modified since the previous
1581          * scan and color them gray until the next scan.
1582          */
1583         rcu_read_lock();
1584         list_for_each_entry_rcu(object, &object_list, object_list) {
1585                 spin_lock_irqsave(&object->lock, flags);
1586                 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1587                     && update_checksum(object) && get_object(object)) {
1588                         /* color it gray temporarily */
1589                         object->count = object->min_count;
1590                         list_add_tail(&object->gray_list, &gray_list);
1591                 }
1592                 spin_unlock_irqrestore(&object->lock, flags);
1593         }
1594         rcu_read_unlock();
1595
1596         /*
1597          * Re-scan the gray list for modified unreferenced objects.
1598          */
1599         scan_gray_list();
1600
1601         /*
1602          * If scanning was stopped do not report any new unreferenced objects.
1603          */
1604         if (scan_should_stop())
1605                 return;
1606
1607         /*
1608          * Scanning result reporting.
1609          */
1610         rcu_read_lock();
1611         list_for_each_entry_rcu(object, &object_list, object_list) {
1612                 spin_lock_irqsave(&object->lock, flags);
1613                 if (unreferenced_object(object) &&
1614                     !(object->flags & OBJECT_REPORTED)) {
1615                         object->flags |= OBJECT_REPORTED;
1616
1617                         if (kmemleak_verbose)
1618                                 print_unreferenced(NULL, object);
1619
1620                         new_leaks++;
1621                 }
1622                 spin_unlock_irqrestore(&object->lock, flags);
1623         }
1624         rcu_read_unlock();
1625
1626         if (new_leaks) {
1627                 kmemleak_found_leaks = true;
1628
1629                 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1630                         new_leaks);
1631         }
1632
1633 }
1634
1635 /*
1636  * Thread function performing automatic memory scanning. Unreferenced objects
1637  * at the end of a memory scan are reported but only the first time.
1638  */
1639 static int kmemleak_scan_thread(void *arg)
1640 {
1641         static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1642
1643         pr_info("Automatic memory scanning thread started\n");
1644         set_user_nice(current, 10);
1645
1646         /*
1647          * Wait before the first scan to allow the system to fully initialize.
1648          */
1649         if (first_run) {
1650                 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1651                 first_run = 0;
1652                 while (timeout && !kthread_should_stop())
1653                         timeout = schedule_timeout_interruptible(timeout);
1654         }
1655
1656         while (!kthread_should_stop()) {
1657                 signed long timeout = jiffies_scan_wait;
1658
1659                 mutex_lock(&scan_mutex);
1660                 kmemleak_scan();
1661                 mutex_unlock(&scan_mutex);
1662
1663                 /* wait before the next scan */
1664                 while (timeout && !kthread_should_stop())
1665                         timeout = schedule_timeout_interruptible(timeout);
1666         }
1667
1668         pr_info("Automatic memory scanning thread ended\n");
1669
1670         return 0;
1671 }
1672
1673 /*
1674  * Start the automatic memory scanning thread. This function must be called
1675  * with the scan_mutex held.
1676  */
1677 static void start_scan_thread(void)
1678 {
1679         if (scan_thread)
1680                 return;
1681         scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1682         if (IS_ERR(scan_thread)) {
1683                 pr_warn("Failed to create the scan thread\n");
1684                 scan_thread = NULL;
1685         }
1686 }
1687
1688 /*
1689  * Stop the automatic memory scanning thread.
1690  */
1691 static void stop_scan_thread(void)
1692 {
1693         if (scan_thread) {
1694                 kthread_stop(scan_thread);
1695                 scan_thread = NULL;
1696         }
1697 }
1698
1699 /*
1700  * Iterate over the object_list and return the first valid object at or after
1701  * the required position with its use_count incremented. The function triggers
1702  * a memory scanning when the pos argument points to the first position.
1703  */
1704 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1705 {
1706         struct kmemleak_object *object;
1707         loff_t n = *pos;
1708         int err;
1709
1710         err = mutex_lock_interruptible(&scan_mutex);
1711         if (err < 0)
1712                 return ERR_PTR(err);
1713
1714         rcu_read_lock();
1715         list_for_each_entry_rcu(object, &object_list, object_list) {
1716                 if (n-- > 0)
1717                         continue;
1718                 if (get_object(object))
1719                         goto out;
1720         }
1721         object = NULL;
1722 out:
1723         return object;
1724 }
1725
1726 /*
1727  * Return the next object in the object_list. The function decrements the
1728  * use_count of the previous object and increases that of the next one.
1729  */
1730 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1731 {
1732         struct kmemleak_object *prev_obj = v;
1733         struct kmemleak_object *next_obj = NULL;
1734         struct kmemleak_object *obj = prev_obj;
1735
1736         ++(*pos);
1737
1738         list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1739                 if (get_object(obj)) {
1740                         next_obj = obj;
1741                         break;
1742                 }
1743         }
1744
1745         put_object(prev_obj);
1746         return next_obj;
1747 }
1748
1749 /*
1750  * Decrement the use_count of the last object required, if any.
1751  */
1752 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1753 {
1754         if (!IS_ERR(v)) {
1755                 /*
1756                  * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1757                  * waiting was interrupted, so only release it if !IS_ERR.
1758                  */
1759                 rcu_read_unlock();
1760                 mutex_unlock(&scan_mutex);
1761                 if (v)
1762                         put_object(v);
1763         }
1764 }
1765
1766 /*
1767  * Print the information for an unreferenced object to the seq file.
1768  */
1769 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1770 {
1771         struct kmemleak_object *object = v;
1772         unsigned long flags;
1773
1774         spin_lock_irqsave(&object->lock, flags);
1775         if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1776                 print_unreferenced(seq, object);
1777         spin_unlock_irqrestore(&object->lock, flags);
1778         return 0;
1779 }
1780
1781 static const struct seq_operations kmemleak_seq_ops = {
1782         .start = kmemleak_seq_start,
1783         .next  = kmemleak_seq_next,
1784         .stop  = kmemleak_seq_stop,
1785         .show  = kmemleak_seq_show,
1786 };
1787
1788 static int kmemleak_open(struct inode *inode, struct file *file)
1789 {
1790         return seq_open(file, &kmemleak_seq_ops);
1791 }
1792
1793 static int dump_str_object_info(const char *str)
1794 {
1795         unsigned long flags;
1796         struct kmemleak_object *object;
1797         unsigned long addr;
1798
1799         if (kstrtoul(str, 0, &addr))
1800                 return -EINVAL;
1801         object = find_and_get_object(addr, 0);
1802         if (!object) {
1803                 pr_info("Unknown object at 0x%08lx\n", addr);
1804                 return -EINVAL;
1805         }
1806
1807         spin_lock_irqsave(&object->lock, flags);
1808         dump_object_info(object);
1809         spin_unlock_irqrestore(&object->lock, flags);
1810
1811         put_object(object);
1812         return 0;
1813 }
1814
1815 /*
1816  * We use grey instead of black to ensure we can do future scans on the same
1817  * objects. If we did not do future scans these black objects could
1818  * potentially contain references to newly allocated objects in the future and
1819  * we'd end up with false positives.
1820  */
1821 static void kmemleak_clear(void)
1822 {
1823         struct kmemleak_object *object;
1824         unsigned long flags;
1825
1826         rcu_read_lock();
1827         list_for_each_entry_rcu(object, &object_list, object_list) {
1828                 spin_lock_irqsave(&object->lock, flags);
1829                 if ((object->flags & OBJECT_REPORTED) &&
1830                     unreferenced_object(object))
1831                         __paint_it(object, KMEMLEAK_GREY);
1832                 spin_unlock_irqrestore(&object->lock, flags);
1833         }
1834         rcu_read_unlock();
1835
1836         kmemleak_found_leaks = false;
1837 }
1838
1839 static void __kmemleak_do_cleanup(void);
1840
1841 /*
1842  * File write operation to configure kmemleak at run-time. The following
1843  * commands can be written to the /sys/kernel/debug/kmemleak file:
1844  *   off        - disable kmemleak (irreversible)
1845  *   stack=on   - enable the task stacks scanning
1846  *   stack=off  - disable the tasks stacks scanning
1847  *   scan=on    - start the automatic memory scanning thread
1848  *   scan=off   - stop the automatic memory scanning thread
1849  *   scan=...   - set the automatic memory scanning period in seconds (0 to
1850  *                disable it)
1851  *   scan       - trigger a memory scan
1852  *   clear      - mark all current reported unreferenced kmemleak objects as
1853  *                grey to ignore printing them, or free all kmemleak objects
1854  *                if kmemleak has been disabled.
1855  *   dump=...   - dump information about the object found at the given address
1856  */
1857 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1858                               size_t size, loff_t *ppos)
1859 {
1860         char buf[64];
1861         int buf_size;
1862         int ret;
1863
1864         buf_size = min(size, (sizeof(buf) - 1));
1865         if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1866                 return -EFAULT;
1867         buf[buf_size] = 0;
1868
1869         ret = mutex_lock_interruptible(&scan_mutex);
1870         if (ret < 0)
1871                 return ret;
1872
1873         if (strncmp(buf, "clear", 5) == 0) {
1874                 if (kmemleak_enabled)
1875                         kmemleak_clear();
1876                 else
1877                         __kmemleak_do_cleanup();
1878                 goto out;
1879         }
1880
1881         if (!kmemleak_enabled) {
1882                 ret = -EBUSY;
1883                 goto out;
1884         }
1885
1886         if (strncmp(buf, "off", 3) == 0)
1887                 kmemleak_disable();
1888         else if (strncmp(buf, "stack=on", 8) == 0)
1889                 kmemleak_stack_scan = 1;
1890         else if (strncmp(buf, "stack=off", 9) == 0)
1891                 kmemleak_stack_scan = 0;
1892         else if (strncmp(buf, "scan=on", 7) == 0)
1893                 start_scan_thread();
1894         else if (strncmp(buf, "scan=off", 8) == 0)
1895                 stop_scan_thread();
1896         else if (strncmp(buf, "scan=", 5) == 0) {
1897                 unsigned long secs;
1898
1899                 ret = kstrtoul(buf + 5, 0, &secs);
1900                 if (ret < 0)
1901                         goto out;
1902                 stop_scan_thread();
1903                 if (secs) {
1904                         jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1905                         start_scan_thread();
1906                 }
1907         } else if (strncmp(buf, "scan", 4) == 0)
1908                 kmemleak_scan();
1909         else if (strncmp(buf, "dump=", 5) == 0)
1910                 ret = dump_str_object_info(buf + 5);
1911         else
1912                 ret = -EINVAL;
1913
1914 out:
1915         mutex_unlock(&scan_mutex);
1916         if (ret < 0)
1917                 return ret;
1918
1919         /* ignore the rest of the buffer, only one command at a time */
1920         *ppos += size;
1921         return size;
1922 }
1923
1924 static const struct file_operations kmemleak_fops = {
1925         .owner          = THIS_MODULE,
1926         .open           = kmemleak_open,
1927         .read           = seq_read,
1928         .write          = kmemleak_write,
1929         .llseek         = seq_lseek,
1930         .release        = seq_release,
1931 };
1932
1933 static void __kmemleak_do_cleanup(void)
1934 {
1935         struct kmemleak_object *object;
1936
1937         rcu_read_lock();
1938         list_for_each_entry_rcu(object, &object_list, object_list)
1939                 delete_object_full(object->pointer);
1940         rcu_read_unlock();
1941 }
1942
1943 /*
1944  * Stop the memory scanning thread and free the kmemleak internal objects if
1945  * no previous scan thread (otherwise, kmemleak may still have some useful
1946  * information on memory leaks).
1947  */
1948 static void kmemleak_do_cleanup(struct work_struct *work)
1949 {
1950         stop_scan_thread();
1951
1952         mutex_lock(&scan_mutex);
1953         /*
1954          * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1955          * longer track object freeing. Ordering of the scan thread stopping and
1956          * the memory accesses below is guaranteed by the kthread_stop()
1957          * function.
1958          */
1959         kmemleak_free_enabled = 0;
1960         mutex_unlock(&scan_mutex);
1961
1962         if (!kmemleak_found_leaks)
1963                 __kmemleak_do_cleanup();
1964         else
1965                 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1966 }
1967
1968 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1969
1970 /*
1971  * Disable kmemleak. No memory allocation/freeing will be traced once this
1972  * function is called. Disabling kmemleak is an irreversible operation.
1973  */
1974 static void kmemleak_disable(void)
1975 {
1976         /* atomically check whether it was already invoked */
1977         if (cmpxchg(&kmemleak_error, 0, 1))
1978                 return;
1979
1980         /* stop any memory operation tracing */
1981         kmemleak_enabled = 0;
1982
1983         /* check whether it is too early for a kernel thread */
1984         if (kmemleak_initialized)
1985                 schedule_work(&cleanup_work);
1986         else
1987                 kmemleak_free_enabled = 0;
1988
1989         pr_info("Kernel memory leak detector disabled\n");
1990 }
1991
1992 /*
1993  * Allow boot-time kmemleak disabling (enabled by default).
1994  */
1995 static int __init kmemleak_boot_config(char *str)
1996 {
1997         if (!str)
1998                 return -EINVAL;
1999         if (strcmp(str, "off") == 0)
2000                 kmemleak_disable();
2001         else if (strcmp(str, "on") == 0)
2002                 kmemleak_skip_disable = 1;
2003         else
2004                 return -EINVAL;
2005         return 0;
2006 }
2007 early_param("kmemleak", kmemleak_boot_config);
2008
2009 static void __init print_log_trace(struct early_log *log)
2010 {
2011         pr_notice("Early log backtrace:\n");
2012         stack_trace_print(log->trace, log->trace_len, 2);
2013 }
2014
2015 /*
2016  * Kmemleak initialization.
2017  */
2018 void __init kmemleak_init(void)
2019 {
2020         int i;
2021         unsigned long flags;
2022
2023 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2024         if (!kmemleak_skip_disable) {
2025                 kmemleak_early_log = 0;
2026                 kmemleak_disable();
2027                 return;
2028         }
2029 #endif
2030
2031         jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2032         jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2033
2034         object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2035         scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2036
2037         if (crt_early_log > ARRAY_SIZE(early_log))
2038                 pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n",
2039                         crt_early_log);
2040
2041         /* the kernel is still in UP mode, so disabling the IRQs is enough */
2042         local_irq_save(flags);
2043         kmemleak_early_log = 0;
2044         if (kmemleak_error) {
2045                 local_irq_restore(flags);
2046                 return;
2047         } else {
2048                 kmemleak_enabled = 1;
2049                 kmemleak_free_enabled = 1;
2050         }
2051         local_irq_restore(flags);
2052
2053         /* register the data/bss sections */
2054         create_object((unsigned long)_sdata, _edata - _sdata,
2055                       KMEMLEAK_GREY, GFP_ATOMIC);
2056         create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2057                       KMEMLEAK_GREY, GFP_ATOMIC);
2058         /* only register .data..ro_after_init if not within .data */
2059         if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
2060                 create_object((unsigned long)__start_ro_after_init,
2061                               __end_ro_after_init - __start_ro_after_init,
2062                               KMEMLEAK_GREY, GFP_ATOMIC);
2063
2064         /*
2065          * This is the point where tracking allocations is safe. Automatic
2066          * scanning is started during the late initcall. Add the early logged
2067          * callbacks to the kmemleak infrastructure.
2068          */
2069         for (i = 0; i < crt_early_log; i++) {
2070                 struct early_log *log = &early_log[i];
2071
2072                 switch (log->op_type) {
2073                 case KMEMLEAK_ALLOC:
2074                         early_alloc(log);
2075                         break;
2076                 case KMEMLEAK_ALLOC_PERCPU:
2077                         early_alloc_percpu(log);
2078                         break;
2079                 case KMEMLEAK_FREE:
2080                         kmemleak_free(log->ptr);
2081                         break;
2082                 case KMEMLEAK_FREE_PART:
2083                         kmemleak_free_part(log->ptr, log->size);
2084                         break;
2085                 case KMEMLEAK_FREE_PERCPU:
2086                         kmemleak_free_percpu(log->ptr);
2087                         break;
2088                 case KMEMLEAK_NOT_LEAK:
2089                         kmemleak_not_leak(log->ptr);
2090                         break;
2091                 case KMEMLEAK_IGNORE:
2092                         kmemleak_ignore(log->ptr);
2093                         break;
2094                 case KMEMLEAK_SCAN_AREA:
2095                         kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
2096                         break;
2097                 case KMEMLEAK_NO_SCAN:
2098                         kmemleak_no_scan(log->ptr);
2099                         break;
2100                 case KMEMLEAK_SET_EXCESS_REF:
2101                         object_set_excess_ref((unsigned long)log->ptr,
2102                                               log->excess_ref);
2103                         break;
2104                 default:
2105                         kmemleak_warn("Unknown early log operation: %d\n",
2106                                       log->op_type);
2107                 }
2108
2109                 if (kmemleak_warning) {
2110                         print_log_trace(log);
2111                         kmemleak_warning = 0;
2112                 }
2113         }
2114 }
2115
2116 /*
2117  * Late initialization function.
2118  */
2119 static int __init kmemleak_late_init(void)
2120 {
2121         struct dentry *dentry;
2122
2123         kmemleak_initialized = 1;
2124
2125         dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
2126                                      &kmemleak_fops);
2127         if (!dentry)
2128                 pr_warn("Failed to create the debugfs kmemleak file\n");
2129
2130         if (kmemleak_error) {
2131                 /*
2132                  * Some error occurred and kmemleak was disabled. There is a
2133                  * small chance that kmemleak_disable() was called immediately
2134                  * after setting kmemleak_initialized and we may end up with
2135                  * two clean-up threads but serialized by scan_mutex.
2136                  */
2137                 schedule_work(&cleanup_work);
2138                 return -ENOMEM;
2139         }
2140
2141         if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2142                 mutex_lock(&scan_mutex);
2143                 start_scan_thread();
2144                 mutex_unlock(&scan_mutex);
2145         }
2146
2147         pr_info("Kernel memory leak detector initialized\n");
2148
2149         return 0;
2150 }
2151 late_initcall(kmemleak_late_init);