1 #include <linux/init.h>
2 #include <linux/kmempagerecorder.h>
3 #include <linux/seq_file.h>
4 #include <linux/uaccess.h>
6 #include <asm/fixmap.h>
7 #include <linux/highmem.h>
8 #include <linux/vmalloc.h>
9 #include <linux/irqflags.h>
10 #include <linux/spinlock.h>
12 #include <linux/device.h>
13 #include <linux/file.h>
14 #include <linux/freezer.h>
16 #include <linux/anon_inodes.h>
17 #include <linux/kthread.h>
18 #include <linux/list.h>
19 #include <linux/memblock.h>
20 #include <linux/miscdevice.h>
21 #include <linux/export.h>
22 #include <linux/rbtree.h>
23 #include <linux/rtmutex.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-buf.h>
28 #include <linux/kallsyms.h>
29 #include <linux/module.h>
30 #include <linux/stacktrace.h>
31 #define BACKTRACE_LEVEL 10
32 #define DEBUG_DEFAULT_FLAGS 1
33 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
34 #define GOLDEN_RATIO_PRIME_32 0x9e370001UL
36 extern void *high_memory;
37 PageHashTable gPageHashTable;
38 PageObjectTable gKernelPageSymbolTable;
39 PageObjectTable gKernelPageBtTable;
40 static struct kmem_cache *page_cachep = NULL;
41 static unsigned int page_cache_created = false;
43 static unsigned int Object_rank_max = 10;
44 static unsigned int queried_address = 0;
45 static unsigned int debug_log = 0;
46 static struct dentry *debug_root;
47 static unsigned int page_record_total = 0;
48 static unsigned int page_record_max = 0;
49 static unsigned int page_record_count = 0;
50 static unsigned int bt_record_total = 0;
51 static unsigned int bt_record_max = 0;
53 /* init hash table mutex */
54 unsigned int page_record_lock_init = 0;
55 unsigned int bt_record_lock_init = 0;
56 unsigned int symbol_record_lock_init = 0;
57 spinlock_t page_record_lock;
58 spinlock_t bt_record_lock;
59 spinlock_t symbol_record_lock;
60 int page_recorder_debug = DEBUG_DEFAULT_FLAGS;
61 unsigned int page_recorder_memory_usage = 0;
62 unsigned int page_recorder_limit = 524288;
63 static char page_recorder_debug_function;
65 static int page_recorder_debug_show(struct seq_file *s, void *unused);
66 static inline unsigned int hash_32(unsigned int val, unsigned int bits);
67 static inline PageHashEntry *find_page_entry(void *page, int slot);
68 static char page_recorder_debug_function;
70 void disable_page_alloc_tracer(void)
72 page_recorder_debug = 0;
75 static int page_recorder_debug_open(struct inode *inode, struct file *file)
77 return single_open(file, page_recorder_debug_show, inode->i_private);
80 static const struct file_operations debug_page_recorder_fops = {
81 .open = page_recorder_debug_open,
84 .release = single_release,
87 static int query_page_backtrace(struct seq_file *s, unsigned int *page)
89 char symbol[KSYM_SYMBOL_LEN];
91 unsigned int *backtrace;
93 unsigned int hash = hash_32((unsigned int)page, 16);
94 unsigned int slot = hash % OBJECT_TABLE_SIZE;
96 PageObjectEntry *bt_entry = NULL;
97 PageHashEntry *entry = NULL;
98 seq_printf(s, "%s %x\n", "query Page address:", (unsigned int)page);
100 /* search page record in hash table */
101 spin_lock_irqsave(&page_record_lock, flags);
102 entry = find_page_entry(page, slot);
104 bt_entry = entry->bt_entry;
105 backtrace = (unsigned int *)bt_entry->object;
106 seq_printf(s, "%x allocate %d %s\n", (unsigned int)entry->page,
107 entry->size * 4096, "bytes and backtrace is ");
108 for (i = 0; i < bt_entry->numEntries; i++) {
109 sprint_symbol(symbol, *(backtrace + i));
110 seq_printf(s, " KERNEL[%d] 0x%x :: symbol %s\n", i,
111 backtrace[i], symbol);
114 seq_printf(s, "can't get page(0x%x) backtrace information\n",
117 spin_unlock_irqrestore(&page_record_lock, flags);
121 static struct page *fixmap_virt_to_page(const void *fixmap_addr)
123 unsigned long addr = (unsigned long)fixmap_addr;
124 struct page *page = NULL;
125 pgd_t *pgd = pgd_offset_k(addr);
127 if (!pgd_none(*pgd)) {
128 pud_t *pud = pud_offset(pgd, addr);
129 if (!pud_none(*pud)) {
130 pmd_t *pmd = pmd_offset(pud, addr);
131 if (!pmd_none(*pmd)) {
133 ptep = pte_offset_map(pmd, addr);
135 if (pte_present(pte))
136 page = pte_page(pte);
144 static int query_page_bt_open(struct seq_file *s, void *data)
147 unsigned int *page_address = NULL;
148 seq_printf(s, "queried_page is : %x\n", queried_address);
150 if(is_vmalloc_or_module_addr((const void *)queried_address)) //vmalloc
152 seq_printf(s, "[vmalloc or module]queried_page is : %x\n", queried_address);
153 page_address = (unsigned int *)vmalloc_to_page((unsigned int *)(queried_address&0xfffff000));
155 else if((queried_address >= 0xC0000000) && (queried_address <= (unsigned int)high_memory ))//lowmem
157 seq_printf(s, "[lowmem]queried_page is : %x\n", queried_address);
158 page_address = (unsigned int *)virt_to_page((void*)(queried_address&0xfffff000 ));
160 else if((queried_address >= FIXADDR_START) && (queried_address <= FIXADDR_TOP)) //fixmap
162 seq_printf(s, "[fixmap]queried_page is : %x\n", queried_address);
163 page_address = (unsigned int *)fixmap_virt_to_page((const void *)(queried_address&0xfffff000));
165 else if((queried_address >= PKMAP_BASE) && (queried_address)<= PKMAP_ADDR(LAST_PKMAP))//pkmap
167 seq_printf(s, "[pkmap]queried_page is : %x\n", queried_address);
168 page_address = (unsigned int *)fixmap_virt_to_page((const void *)(queried_address&0xfffff000));
172 seq_printf(s, "[ERROR!!]queried_page is : %x can't find address in memory map\n", queried_address);
174 query_page_backtrace(s,(unsigned int*)page_address);
179 static int query_page_single_open(struct inode *inode, struct file *file)
181 return single_open(file, query_page_bt_open, inode->i_private);
184 static const struct file_operations query_page_ios_fops = {
185 .open = query_page_single_open,
188 .release = single_release,
191 static unsigned int get_kernel_backtrace(unsigned long *backtrace,
194 unsigned long stack_entries[BACKTRACE_LEVEL];
196 char tmp[KSYM_SYMBOL_LEN];
197 struct stack_trace trace = {
199 .entries = &stack_entries[0],
200 .max_entries = BACKTRACE_LEVEL,
203 save_stack_trace(&trace);
204 if (trace.nr_entries > 0) {
206 for (i = 0; i < trace.nr_entries; i++) {
207 sprint_symbol(tmp, trace.entries[i]);
208 pr_debug("[%d] 0x%x %s\n", i,
209 (unsigned int)trace.entries[i], tmp);
212 memcpy(backtrace, (unsigned long *)trace.entries,
213 sizeof(unsigned int) * trace.nr_entries);
217 ("[ERROR]can't get backtrace [get_kernel_backtrace] backtrace num: [%d]\n",
220 return trace.nr_entries;
223 static inline unsigned int hash_32(unsigned int val, unsigned int bits)
225 /* On some cpus multiply is faster, on others gcc will do shifts */
226 unsigned int hash = val * GOLDEN_RATIO_PRIME_32;
228 /* High bits are more random, so use them. */
229 return hash >> (32 - bits);
232 static uint32_t get_hash(void *object, size_t numEntries)
234 unsigned int *backtrace = NULL;
235 unsigned int hash = 0;
238 backtrace = (unsigned int *)object;
239 if (backtrace == NULL) {
242 for (i = 0; i < numEntries; i++) {
243 hash = (hash * 33) + (*(backtrace + i) >> 2);
248 PageObjectEntry *find_entry(PageObjectTable * table, unsigned int slot,
249 void *object, unsigned int numEntries,
252 PageObjectEntry *entry = table->slots[slot];
253 while (entry != NULL) {
254 if (entry->numEntries == numEntries &&
255 !memcmp(object, entry->object,
256 numEntries * sizeof(unsigned int))) {
264 static void *allocate_record(unsigned int type)
267 case NODE_PAGE_RECORD:
269 if (!page_cache_created) {
270 /* page_cachep = kmem_cache_create("page_record",
271 sizeof(PageHashEntry),0,SLAB_NO_DEBUG,NULL); */
273 kmem_cache_create("page_record",
274 sizeof(PageHashEntry), 0,
276 page_cache_created = true;
278 /* if system ram < 2G, page_record_total should less than 524288 */
279 if ((page_cachep != NULL)
280 && (page_record_total < page_recorder_limit)) {
283 (void *)kmem_cache_alloc(page_cachep,
297 /* get record from hash table or create new node from slab allocator */
298 static void *get_record(unsigned int type, page_record_t * param)
300 page_record_t *tmp = param;
301 PageObjectEntry *entry = NULL;
308 case HASH_PAGE_NODE_KERNEL_PAGE_ALLOC_BACKTRACE:
310 hash = get_hash(param->backtrace,
311 param->backtrace_num);
312 slot = hash % OBJECT_TABLE_SIZE;
313 spin_lock_irqsave(&bt_record_lock, flags);
315 find_entry(&gKernelPageBtTable, slot,
316 (void *)param->backtrace,
317 param->backtrace_num,
322 entry->size + (1 << param->size);
323 spin_unlock_irqrestore(&bt_record_lock,
326 spin_unlock_irqrestore(&bt_record_lock,
329 /* total bt reocrd size should less than 5MB */
330 if (bt_record_total < 50412) {
340 ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
347 /* kmalloc can't get right memory space when booting */
348 if ((unsigned int)entry < 0xC0000000) {
350 ("[BAKCTRACEINFO][allocate bt mem] entry (0x%x) drop address \n",
351 (unsigned int)entry);
354 entry->reference = 1;
358 param->backtrace_num;
359 entry->size = 1 << param->size;
360 memcpy(entry->object, param->backtrace,
362 sizeof(unsigned int));
363 spin_lock_irqsave(&bt_record_lock,
366 gKernelPageBtTable.slots[slot];
367 gKernelPageBtTable.slots[slot] = entry;
368 if (entry->next != NULL) {
369 entry->next->prev = entry;
371 gKernelPageBtTable.count++;
373 if (bt_record_total > bt_record_max) {
374 bt_record_max = bt_record_total;
376 spin_unlock_irqrestore(&bt_record_lock,
381 case HASH_PAGE_NODE_KERNEL_SYMBOL:
384 get_hash(param->kernel_symbol,
385 param->backtrace_num);
386 slot = hash % OBJECT_TABLE_SIZE;
387 spin_lock_irqsave(&symbol_record_lock, flags);
389 find_entry(&gKernelPageSymbolTable, slot,
390 (void *)param->kernel_symbol,
391 param->backtrace_num,
395 spin_unlock_irqrestore
396 (&symbol_record_lock, flags);
399 spin_unlock_irqrestore
400 (&symbol_record_lock, flags);
402 kmalloc(sizeof(PageObjectEntry) +
403 (param->backtrace_num *
404 sizeof(unsigned int)),
408 ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
411 entry->reference = 1;
415 param->backtrace_num;
416 memcpy(entry->object,
417 param->kernel_symbol,
419 sizeof(unsigned int));
420 spin_lock_irqsave(&symbol_record_lock,
423 gKernelPageSymbolTable.slots[slot];
424 gKernelPageSymbolTable.slots[slot] =
426 if (entry->next != NULL) {
427 entry->next->prev = entry;
429 gKernelPageSymbolTable.count++;
430 spin_unlock_irqrestore
431 (&symbol_record_lock, flags);
439 static inline PageHashEntry *find_page_entry(void *page, int slot)
441 PageHashEntry *entry = gPageHashTable.page_hash_table[slot];
442 while (entry != NULL) {
443 if (entry->page == page) {
451 PageHashEntry *record_page_info(PageObjectEntry * bt_entry,
452 PageObjectEntry * map_entry, void *page,
453 unsigned int order, unsigned int flag)
455 /* calculate the hash value */
456 unsigned int hash = hash_32((unsigned int)page, 16);
457 unsigned int slot = hash % OBJECT_TABLE_SIZE;
460 PPageHashEntry entry =
461 (PPageHashEntry) allocate_record(NODE_PAGE_RECORD);
464 ("[get_record][KERNEL_PAGE_ALLOC_BACKTRACE]can't get enough memory to create page entry\n");
467 /* initialize page entry */
469 entry->size = 1 << order;
470 entry->allocate_map_entry = map_entry;
471 entry->bt_entry = bt_entry;
472 entry->flag = (2 | flag);
474 spin_lock_irqsave(&page_record_lock, flags);
476 /* insert the entry to the head of slot list */
477 if (gPageHashTable.page_hash_table[slot] == NULL) {
480 (gPageHashTable.page_hash_table[slot])->prev = entry;
481 entry->next = gPageHashTable.page_hash_table[slot];
483 gPageHashTable.page_hash_table[slot] = entry;
484 gPageHashTable.count++;
486 if (page_record_total > page_record_max) {
487 page_record_max = page_record_total;
490 if (page_record_count >= 1000) {
491 page_recorder_memory_usage =
492 page_record_total * sizeof(PageHashEntry) +
493 bt_record_total * (sizeof(PageObjectEntry) +
494 (20 * sizeof(unsigned int)));
496 ("[TOTAL PAGE RECORD !!!] page record size is %d max page record size is %d\n",
497 page_record_total * sizeof(PageHashEntry),
498 page_record_max * sizeof(PageHashEntry));
500 ("[TOTAL BACKTRACE RECORD !!!] bt record size is %d max bt record size is %d\n",
501 bt_record_total * (sizeof(PageObjectEntry) +
502 (20 * sizeof(unsigned int))),
503 bt_record_max * (sizeof(PageObjectEntry) +
504 (20 * sizeof(unsigned int))));
505 page_record_count = 0;
508 spin_unlock_irqrestore(&page_record_lock, flags);
512 int remove_page_info(void *page, unsigned int order)
514 unsigned int hash = hash_32((unsigned int)page, 16);
515 unsigned int slot = hash % OBJECT_TABLE_SIZE;
516 PageObjectEntry *bt_entry = NULL;
517 PageHashEntry *entry = NULL;
520 /* search page record in hash table */
521 if (page_record_lock_init == 0) {
522 page_record_lock_init = 1;
523 spin_lock_init(&page_record_lock);
525 if (bt_record_lock_init == 0) {
526 bt_record_lock_init = 1;
527 spin_lock_init(&bt_record_lock);
530 spin_lock_irqsave(&page_record_lock, flags);
531 entry = find_page_entry(page, slot);
533 spin_unlock_irqrestore(&page_record_lock, flags);
534 /* pr_debug("[remove_page_info]can't find page info 0x%x\n",page); */
536 get_kernel_backtrace(NULL, 1);
540 /* remove page record from hash table */
542 if (entry->prev == NULL) {
543 gPageHashTable.page_hash_table[slot] = entry->next;
544 /* not only one entry in the slot */
545 if (gPageHashTable.page_hash_table[slot] != NULL)
546 gPageHashTable.page_hash_table[slot]->prev =
548 } else if (entry->next == NULL) {
549 entry->prev->next = NULL;
551 entry->next->prev = entry->prev;
552 entry->prev->next = entry->next;
555 gPageHashTable.count--;
557 spin_unlock_irqrestore(&page_record_lock, flags);
559 /* clean page entry */
562 bt_entry = entry->bt_entry;
563 kmem_cache_free(page_cachep, entry);
565 /* create alloc bt entry for historical allocation */
566 if (bt_entry == NULL) {
569 spin_lock_irqsave(&bt_record_lock, flags);
570 if (bt_entry->reference > 1) {
571 (bt_entry->reference)--;
572 bt_entry->size = bt_entry->size - (1 << order);
573 spin_unlock_irqrestore(&bt_record_lock, flags);
575 ("[remove_page_info] bt_entry->size %d\n",
577 } else if (bt_entry->reference == 1) {
578 unsigned int hash_bt;
579 unsigned int slot_bt;
581 get_hash(bt_entry->object,
582 bt_entry->numEntries);
583 slot_bt = hash_bt % OBJECT_TABLE_SIZE;
585 if (bt_entry->prev == NULL) { /* head */
586 gKernelPageBtTable.slots[slot_bt] =
588 /* not only one entry in the slot */
589 if (gKernelPageBtTable.slots[slot_bt] !=
591 gKernelPageBtTable.slots
592 [slot_bt]->prev = NULL;
593 } else if (bt_entry->next == NULL) {
594 bt_entry->prev->next = NULL;
596 bt_entry->next->prev = bt_entry->prev;
597 bt_entry->prev->next = bt_entry->next;
599 spin_unlock_irqrestore(&bt_record_lock, flags);
603 spin_unlock_irqrestore(&bt_record_lock, flags);
604 pr_err("ERROR !!!!free page info\n");
611 int record_page_record(void *page, unsigned int order)
613 void *entry, *map_entry = NULL;
614 page_record_t record_param;
615 if (!page_recorder_debug) {
618 if (page_record_lock_init == 0) {
619 page_record_lock_init = 1;
620 spin_lock_init(&page_record_lock);
622 if (bt_record_lock_init == 0) {
623 bt_record_lock_init = 1;
624 spin_lock_init(&bt_record_lock);
627 /* get_kernel_backtrace(NULL,1); */
629 record_param.page = page;
630 record_param.size = order;
631 record_param.backtrace_num =
632 (unsigned int)get_kernel_backtrace((unsigned long *)
633 record_param.backtrace,
637 get_record(HASH_PAGE_NODE_KERNEL_PAGE_ALLOC_BACKTRACE,
640 pr_debug("[get_record][KERNEL_PAGE_ALLOC_BACKTRACE]");
642 ("can't get enough memory to create backtrace object\n");
645 record_page_info((PageObjectEntry *) entry,
646 (PageObjectEntry *) map_entry, record_param.page,
647 record_param.size, 0);
651 EXPORT_SYMBOL(record_page_record);
653 int remove_page_record(void *page, unsigned int order)
655 page_record_t record_param;
656 record_param.page = page;
657 record_param.size = order;
659 if (!page_recorder_debug) {
662 /* record_param.backtrace_num = get_kernel_backtrace(
663 (unsigned long *)record_param.backtrace,(unsigned int)0); */
664 /* get_kernel_symbol((unsigned long *)record_param.backtrace,
665 record_param.backtrace_num,&(record_param.kernel_symbol[0])); */
667 /* get_kernel_backtrace(NULL,1); */
670 remove_page_info(record_param.page, record_param.size);
674 EXPORT_SYMBOL(remove_page_record);
676 static int page_recorder_debug_show(struct seq_file *s, void *unused)
678 unsigned int index = 0;
679 unsigned int *backtrace;
680 unsigned int rank_index = 0;
681 char symbol[KSYM_SYMBOL_LEN];
683 struct page_object_rank_entry *rank_head = NULL;
684 struct page_object_rank_entry *rank_tail = NULL;
685 unsigned int Object_rank_count = 0;
686 PageObjectEntry *tmp = NULL;
689 seq_printf(s, "page_recorder_debug: [%d]\n", page_recorder_debug);
690 seq_printf(s, "page_recorder_limit: [%d]\n", page_recorder_limit);
691 seq_printf(s, "TOP %d page allocation \n", Object_rank_max);
692 for (index = 0; index < OBJECT_TABLE_SIZE; index++) {
694 spin_lock_irqsave(&bt_record_lock, flags);
695 tmp = gKernelPageBtTable.slots[index];
696 while (tmp != NULL) {
697 struct page_object_rank_entry *rank_tmp = rank_head;
698 struct page_object_rank_entry *rank_tmp_prev =
700 for (rank_index = 0; rank_index < Object_rank_max;
702 struct page_object_rank_entry *new_rank_entry =
704 PageObjectEntry *entry = NULL;
705 if ((rank_tmp != NULL)
706 && (rank_tmp->entry->size <= tmp->size)) {
707 /* insert current record into list */
708 PageObjectEntry *entry = NULL;
710 (struct page_object_rank_entry *)
713 page_object_rank_entry),
715 if (new_rank_entry == NULL) {
716 spin_unlock_irqrestore
717 (&bt_record_lock, flags);
719 ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
723 kmalloc(sizeof(PageObjectEntry) +
724 (20 * sizeof(unsigned int)),
727 spin_unlock_irqrestore
728 (&bt_record_lock, flags);
730 ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
734 sizeof(PageObjectEntry) +
735 (20 * sizeof(unsigned int)));
736 new_rank_entry->entry = entry;
737 new_rank_entry->prev = rank_tmp->prev;
738 if (rank_tmp->prev != NULL) {
739 rank_tmp->prev->next =
742 rank_tmp->prev = new_rank_entry;
743 new_rank_entry->next = rank_tmp;
744 if (new_rank_entry->prev == NULL) {
745 rank_head = new_rank_entry;
747 if (Object_rank_count <
751 /* free last rank_entry */
752 if (rank_tail != NULL) {
754 page_object_rank_entry
758 rank_tail->prev->next =
760 kfree(rank_tail->entry);
762 rank_tail = new_tail;
765 ("ERROR!!! rank_tail is NULL\n");
769 } else if ((rank_tmp == NULL)
770 && (Object_rank_count <
772 /* if rank entry is less than object_entry_max,
773 create new rank entry and insert it in rank list */
775 (struct page_object_rank_entry *)
778 page_object_rank_entry),
780 if (new_rank_entry == NULL) {
781 spin_unlock_irqrestore
782 (&bt_record_lock, flags);
784 ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
788 kmalloc(sizeof(PageObjectEntry) +
789 (20 * sizeof(unsigned int)),
792 spin_unlock_irqrestore
793 (&bt_record_lock, flags);
795 ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
799 sizeof(PageObjectEntry) +
800 (20 * sizeof(unsigned int)));
801 new_rank_entry->entry = entry;
802 new_rank_entry->next = NULL;
803 new_rank_entry->prev = rank_tmp_prev;
804 if (rank_tmp_prev != NULL) {
805 rank_tmp_prev->next =
808 if (new_rank_entry->prev == NULL) {
809 rank_head = new_rank_entry;
811 rank_tail = new_rank_entry;
815 rank_tmp_prev = rank_tmp;
816 rank_tmp = rank_tmp->next;
820 spin_unlock_irqrestore(&bt_record_lock, flags);
823 /* print top object_rank_max record */
825 struct page_object_rank_entry *rank_tmp = rank_head;
826 struct page_object_rank_entry *tmp_record = NULL;
828 while (rank_tmp != NULL) {
829 backtrace = (unsigned int *)rank_tmp->entry->object;
830 seq_printf(s, "[%d]%s %d %s\n", rank_index,
832 rank_tmp->entry->size * 4096, "bytes");
833 for (i = 0; i < rank_tmp->entry->numEntries; i++) {
834 sprint_symbol(symbol, *(backtrace + i));
836 " KERNEL[%d] 0x%x :: symbol %s\n",
837 i, backtrace[i], symbol);
840 tmp_record = rank_tmp;
841 rank_tmp = rank_tmp->next;
842 kfree(tmp_record->entry);
849 static int __init setup_page_recorder_debug(char *str)
851 page_recorder_debug = DEBUG_DEFAULT_FLAGS;
852 if (*str++ != '=' || !*str)
854 * No options specified. Switch on full debugging.
860 * No options but restriction on page recorder. This means full
861 * debugging for page recorder matching a pattern.
863 goto check_page_recorder;
865 page_recorder_debug = 0;
868 * Switch off all debugging measures.
874 page_recorder_debug_function = *(str + 1);
879 __setup("page_recorder_debug", setup_page_recorder_debug);
881 static int __init page_recorder_init(void)
883 /* Create page allocate */
884 debug_root = debugfs_create_dir("page_recorder", NULL);
885 debugfs_create_file("Usage_rank", 0444, debug_root, NULL,
886 &debug_page_recorder_fops);
887 debugfs_create_u32("Rank_number", 0644, debug_root, &Object_rank_max);
888 debugfs_create_file("query_page", 0644, debug_root, NULL,
889 &query_page_ios_fops);
890 debugfs_create_u32("page_virtual_address", 0644, debug_root,
892 debugfs_create_u32("debug_log", 0644, debug_root, &debug_log);
893 debugfs_create_u32("page_recorder_debug", 0644, debug_root,
894 &page_recorder_debug);
895 debugfs_create_u32("page_recorder_memory_usage", 0644, debug_root,
896 &page_recorder_memory_usage);
897 debugfs_create_u32("page_recorder_limit", 0644, debug_root,
898 &page_recorder_limit);
902 late_initcall(page_recorder_init);