packaging: install license for rpm package instead of license package
[profile/mobile/platform/kernel/linux-3.10-sc7730.git] / mm / kmempagerecorder.c
1 #include <linux/init.h>
2 #include <linux/kmempagerecorder.h>
3 #include <linux/seq_file.h>
4 #include <linux/uaccess.h>
5 #include <linux/mm.h>
6 #include <asm/fixmap.h>
7 #include <linux/highmem.h>
8 #include <linux/vmalloc.h>
9 #include <linux/irqflags.h>
10 #include <linux/spinlock.h>
11
12 #include <linux/device.h>
13 #include <linux/file.h>
14 #include <linux/freezer.h>
15 #include <linux/fs.h>
16 #include <linux/anon_inodes.h>
17 #include <linux/kthread.h>
18 #include <linux/list.h>
19 #include <linux/memblock.h>
20 #include <linux/miscdevice.h>
21 #include <linux/export.h>
22 #include <linux/rbtree.h>
23 #include <linux/rtmutex.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/debugfs.h>
27 #include <linux/dma-buf.h>
28 #include <linux/kallsyms.h>
29 #include <linux/module.h>
30 #include <linux/stacktrace.h>
31 #define BACKTRACE_LEVEL 10
32 #define DEBUG_DEFAULT_FLAGS 1
33 /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
34 #define GOLDEN_RATIO_PRIME_32 0x9e370001UL
35
36 extern void *high_memory;
37 PageHashTable gPageHashTable;
38 PageObjectTable gKernelPageSymbolTable;
39 PageObjectTable gKernelPageBtTable;
40 static struct kmem_cache *page_cachep = NULL;
41 static unsigned int page_cache_created = false;
42
43 static unsigned int Object_rank_max = 10;
44 static unsigned int queried_address = 0;
45 static unsigned int debug_log = 0;
46 static struct dentry *debug_root;
47 static unsigned int page_record_total = 0;
48 static unsigned int page_record_max = 0;
49 static unsigned int page_record_count = 0;
50 static unsigned int bt_record_total = 0;
51 static unsigned int bt_record_max = 0;
52
53 /* init hash table mutex        */
54 unsigned int page_record_lock_init = 0;
55 unsigned int bt_record_lock_init = 0;
56 unsigned int symbol_record_lock_init = 0;
57 spinlock_t page_record_lock;
58 spinlock_t bt_record_lock;
59 spinlock_t symbol_record_lock;
60 int page_recorder_debug = DEBUG_DEFAULT_FLAGS;
61 unsigned int page_recorder_memory_usage = 0;
62 unsigned int page_recorder_limit = 524288;
63 static char page_recorder_debug_function;
64
65 static int page_recorder_debug_show(struct seq_file *s, void *unused);
66 static inline unsigned int hash_32(unsigned int val, unsigned int bits);
67 static inline PageHashEntry *find_page_entry(void *page, int slot);
68 static char page_recorder_debug_function;
69
70 void disable_page_alloc_tracer(void)
71 {
72         page_recorder_debug = 0;
73 }
74
75 static int page_recorder_debug_open(struct inode *inode, struct file *file)
76 {
77         return single_open(file, page_recorder_debug_show, inode->i_private);
78 }
79
80 static const struct file_operations debug_page_recorder_fops = {
81         .open = page_recorder_debug_open,
82         .read = seq_read,
83         .llseek = seq_lseek,
84         .release = single_release,
85 };
86
87 static int query_page_backtrace(struct seq_file *s, unsigned int *page)
88 {
89         char symbol[KSYM_SYMBOL_LEN];
90         unsigned long flags;
91         unsigned int *backtrace;
92         unsigned int i;
93         unsigned int hash = hash_32((unsigned int)page, 16);
94         unsigned int slot = hash % OBJECT_TABLE_SIZE;
95
96         PageObjectEntry *bt_entry = NULL;
97         PageHashEntry *entry = NULL;
98         seq_printf(s, "%s %x\n", "query Page address:", (unsigned int)page);
99
100         /* search page record in hash table */
101         spin_lock_irqsave(&page_record_lock, flags);
102         entry = find_page_entry(page, slot);
103         if (entry != NULL) {
104                 bt_entry = entry->bt_entry;
105                 backtrace = (unsigned int *)bt_entry->object;
106                 seq_printf(s, "%x allocate %d %s\n", (unsigned int)entry->page,
107                            entry->size * 4096, "bytes and backtrace is ");
108                 for (i = 0; i < bt_entry->numEntries; i++) {
109                         sprint_symbol(symbol, *(backtrace + i));
110                         seq_printf(s, "  KERNEL[%d] 0x%x :: symbol %s\n", i,
111                                    backtrace[i], symbol);
112                 }
113         } else {
114                 seq_printf(s, "can't get page(0x%x) backtrace information\n",
115                            (unsigned int)page);
116         }
117         spin_unlock_irqrestore(&page_record_lock, flags);
118         return 0;
119 }
120
121 static struct page *fixmap_virt_to_page(const void *fixmap_addr)
122 {
123         unsigned long addr = (unsigned long)fixmap_addr;
124         struct page *page = NULL;
125         pgd_t *pgd = pgd_offset_k(addr);
126
127         if (!pgd_none(*pgd)) {
128                 pud_t *pud = pud_offset(pgd, addr);
129                 if (!pud_none(*pud)) {
130                         pmd_t *pmd = pmd_offset(pud, addr);
131                         if (!pmd_none(*pmd)) {
132                                 pte_t *ptep, pte;
133                                 ptep = pte_offset_map(pmd, addr);
134                                 pte = *ptep;
135                                 if (pte_present(pte))
136                                         page = pte_page(pte);
137                                 pte_unmap(ptep);
138                         }
139                 }
140         }
141         return page;
142 }
143
144 static int query_page_bt_open(struct seq_file *s, void *data)
145 {
146 /*
147         unsigned int *page_address = NULL;
148         seq_printf(s, "queried_page is  : %x\n", queried_address);
149
150         if(is_vmalloc_or_module_addr((const void *)queried_address)) //vmalloc
151         {
152                 seq_printf(s, "[vmalloc or module]queried_page is  : %x\n", queried_address);
153                 page_address = (unsigned int *)vmalloc_to_page((unsigned int *)(queried_address&0xfffff000));
154         }
155   else if((queried_address >= 0xC0000000) && (queried_address <= (unsigned int)high_memory ))//lowmem
156   {
157                 seq_printf(s, "[lowmem]queried_page is  : %x\n", queried_address);
158                 page_address = (unsigned int *)virt_to_page((void*)(queried_address&0xfffff000 ));
159   }
160         else if((queried_address >= FIXADDR_START) && (queried_address <= FIXADDR_TOP)) //fixmap
161         {
162                 seq_printf(s, "[fixmap]queried_page is  : %x\n", queried_address);
163                 page_address = (unsigned int *)fixmap_virt_to_page((const void *)(queried_address&0xfffff000));
164         }
165         else if((queried_address >= PKMAP_BASE) && (queried_address)<= PKMAP_ADDR(LAST_PKMAP))//pkmap
166         {
167                 seq_printf(s, "[pkmap]queried_page is  : %x\n", queried_address);
168                 page_address = (unsigned int *)fixmap_virt_to_page((const void *)(queried_address&0xfffff000));
169         }
170         else
171         {
172                 seq_printf(s, "[ERROR!!]queried_page is  : %x can't find address in memory map\n", queried_address);
173         }
174         query_page_backtrace(s,(unsigned int*)page_address);
175 */
176         return 0;
177 }
178
179 static int query_page_single_open(struct inode *inode, struct file *file)
180 {
181         return single_open(file, query_page_bt_open, inode->i_private);
182 }
183
184 static const struct file_operations query_page_ios_fops = {
185         .open = query_page_single_open,
186         .read = seq_read,
187         .llseek = seq_lseek,
188         .release = single_release,
189 };
190
191 static unsigned int get_kernel_backtrace(unsigned long *backtrace,
192                                          unsigned int debug)
193 {
194         unsigned long stack_entries[BACKTRACE_LEVEL];
195         unsigned int i = 0;
196         char tmp[KSYM_SYMBOL_LEN];
197         struct stack_trace trace = {
198                 .nr_entries = 0,
199                 .entries = &stack_entries[0],
200                 .max_entries = BACKTRACE_LEVEL,
201                 .skip = 1
202         };
203         save_stack_trace(&trace);
204         if (trace.nr_entries > 0) {
205                 if (debug) {
206                         for (i = 0; i < trace.nr_entries; i++) {
207                                 sprint_symbol(tmp, trace.entries[i]);
208                                 pr_debug("[%d] 0x%x %s\n", i,
209                                          (unsigned int)trace.entries[i], tmp);
210                         }
211                 } else {
212                         memcpy(backtrace, (unsigned long *)trace.entries,
213                                sizeof(unsigned int) * trace.nr_entries);
214                 }
215         } else {
216                 pr_err
217                     ("[ERROR]can't get backtrace [get_kernel_backtrace] backtrace num: [%d]\n",
218                      trace.nr_entries);
219         }
220         return trace.nr_entries;
221 }
222
223 static inline unsigned int hash_32(unsigned int val, unsigned int bits)
224 {
225         /* On some cpus multiply is faster, on others gcc will do shifts */
226         unsigned int hash = val * GOLDEN_RATIO_PRIME_32;
227
228         /* High bits are more random, so use them. */
229         return hash >> (32 - bits);
230 }
231
232 static uint32_t get_hash(void *object, size_t numEntries)
233 {
234         unsigned int *backtrace = NULL;
235         unsigned int hash = 0;
236         size_t i;
237
238         backtrace = (unsigned int *)object;
239         if (backtrace == NULL) {
240                 return 0;
241         }
242         for (i = 0; i < numEntries; i++) {
243                 hash = (hash * 33) + (*(backtrace + i) >> 2);
244         }
245         return hash;
246 }
247
248 PageObjectEntry *find_entry(PageObjectTable * table, unsigned int slot,
249                             void *object, unsigned int numEntries,
250                             unsigned int size)
251 {
252         PageObjectEntry *entry = table->slots[slot];
253         while (entry != NULL) {
254                 if (entry->numEntries == numEntries &&
255                     !memcmp(object, entry->object,
256                             numEntries * sizeof(unsigned int))) {
257                         return entry;
258                 }
259                 entry = entry->next;
260         }
261         return NULL;
262 }
263
264 static void *allocate_record(unsigned int type)
265 {
266         switch (type) {
267         case NODE_PAGE_RECORD:
268                 {
269                         if (!page_cache_created) {
270                                 /* page_cachep = kmem_cache_create("page_record",
271                                    sizeof(PageHashEntry),0,SLAB_NO_DEBUG,NULL); */
272                                 page_cachep =
273                                     kmem_cache_create("page_record",
274                                                       sizeof(PageHashEntry), 0,
275                                                       0, NULL);
276                                 page_cache_created = true;
277                         }
278                         /* if system ram < 2G, page_record_total should less than 524288 */
279                         if ((page_cachep != NULL)
280                             && (page_record_total < page_recorder_limit)) {
281                                 void *tmp = NULL;
282                                 tmp =
283                                     (void *)kmem_cache_alloc(page_cachep,
284                                                              GFP_KERNEL);
285                                 if (tmp == 0) {
286                                         return NULL;
287                                 }
288                                 return tmp;
289                         }
290                         return NULL;
291                         break;
292                 }
293         }
294         return NULL;
295 }
296
297 /* get record from hash table or create new node from slab allocator */
298 static void *get_record(unsigned int type, page_record_t * param)
299 {
300         page_record_t *tmp = param;
301         PageObjectEntry *entry = NULL;
302         unsigned int hash;
303         unsigned int slot;
304         unsigned long flags;
305
306         if (tmp != NULL) {
307                 switch (type) {
308                 case HASH_PAGE_NODE_KERNEL_PAGE_ALLOC_BACKTRACE:
309                         {
310                                 hash = get_hash(param->backtrace,
311                                                 param->backtrace_num);
312                                 slot = hash % OBJECT_TABLE_SIZE;
313                                 spin_lock_irqsave(&bt_record_lock, flags);
314                                 entry =
315                                     find_entry(&gKernelPageBtTable, slot,
316                                                (void *)param->backtrace,
317                                                param->backtrace_num,
318                                                param->size);
319                                 if (entry != NULL) {
320                                         entry->reference++;
321                                         entry->size =
322                                             entry->size + (1 << param->size);
323                                         spin_unlock_irqrestore(&bt_record_lock,
324                                                                flags);
325                                 } else {
326                                         spin_unlock_irqrestore(&bt_record_lock,
327                                                                flags);
328
329                                         /* total bt reocrd size should less than 5MB */
330                                         if (bt_record_total < 50412) {
331                                                 entry =
332                                                     kmalloc(sizeof
333                                                             (PageObjectEntry) +
334                                                             (20 *
335                                                              sizeof(unsigned
336                                                                     int)),
337                                                             GFP_KERNEL);
338                                                 if (entry == NULL) {
339                                                         pr_err
340                                                             ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
341                                                         return NULL;
342                                                 }
343                                         } else {
344                                                 return NULL;
345                                         }
346
347                                         /* kmalloc can't get right memory space when booting */
348                                         if ((unsigned int)entry < 0xC0000000) {
349                                                 pr_debug
350                                                     ("[BAKCTRACEINFO][allocate bt mem] entry (0x%x) drop address \n",
351                                                      (unsigned int)entry);
352                                                 return NULL;
353                                         }
354                                         entry->reference = 1;
355                                         entry->prev = NULL;
356                                         entry->slot = slot;
357                                         entry->numEntries =
358                                             param->backtrace_num;
359                                         entry->size = 1 << param->size;
360                                         memcpy(entry->object, param->backtrace,
361                                                entry->numEntries *
362                                                sizeof(unsigned int));
363                                         spin_lock_irqsave(&bt_record_lock,
364                                                           flags);
365                                         entry->next =
366                                             gKernelPageBtTable.slots[slot];
367                                         gKernelPageBtTable.slots[slot] = entry;
368                                         if (entry->next != NULL) {
369                                                 entry->next->prev = entry;
370                                         }
371                                         gKernelPageBtTable.count++;
372                                         bt_record_total++;
373                                         if (bt_record_total > bt_record_max) {
374                                                 bt_record_max = bt_record_total;
375                                         }
376                                         spin_unlock_irqrestore(&bt_record_lock,
377                                                                flags);
378                                 }
379                                 return entry;
380                         }
381                 case HASH_PAGE_NODE_KERNEL_SYMBOL:
382                         {
383                                 hash =
384                                     get_hash(param->kernel_symbol,
385                                              param->backtrace_num);
386                                 slot = hash % OBJECT_TABLE_SIZE;
387                                 spin_lock_irqsave(&symbol_record_lock, flags);
388                                 entry =
389                                     find_entry(&gKernelPageSymbolTable, slot,
390                                                (void *)param->kernel_symbol,
391                                                param->backtrace_num,
392                                                param->size);
393                                 if (entry != NULL) {
394                                         entry->reference++;
395                                         spin_unlock_irqrestore
396                                             (&symbol_record_lock, flags);
397                                         return NULL;
398                                 } else {
399                                         spin_unlock_irqrestore
400                                             (&symbol_record_lock, flags);
401                                         entry =
402                                             kmalloc(sizeof(PageObjectEntry) +
403                                                     (param->backtrace_num *
404                                                      sizeof(unsigned int)),
405                                                     GFP_KERNEL);
406                                         if (entry == NULL) {
407                                                 pr_err
408                                                     ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
409                                                 return NULL;
410                                         }
411                                         entry->reference = 1;
412                                         entry->prev = NULL;
413                                         entry->slot = slot;
414                                         entry->numEntries =
415                                             param->backtrace_num;
416                                         memcpy(entry->object,
417                                                param->kernel_symbol,
418                                                entry->numEntries *
419                                                sizeof(unsigned int));
420                                         spin_lock_irqsave(&symbol_record_lock,
421                                                           flags);
422                                         entry->next =
423                                             gKernelPageSymbolTable.slots[slot];
424                                         gKernelPageSymbolTable.slots[slot] =
425                                             entry;
426                                         if (entry->next != NULL) {
427                                                 entry->next->prev = entry;
428                                         }
429                                         gKernelPageSymbolTable.count++;
430                                         spin_unlock_irqrestore
431                                             (&symbol_record_lock, flags);
432                                 }
433                         }
434                 }
435         }
436         return NULL;
437 }
438
439 static inline PageHashEntry *find_page_entry(void *page, int slot)
440 {
441         PageHashEntry *entry = gPageHashTable.page_hash_table[slot];
442         while (entry != NULL) {
443                 if (entry->page == page) {
444                         return entry;
445                 }
446                 entry = entry->next;
447         }
448         return NULL;
449 }
450
451 PageHashEntry *record_page_info(PageObjectEntry * bt_entry,
452                                 PageObjectEntry * map_entry, void *page,
453                                 unsigned int order, unsigned int flag)
454 {
455         /* calculate the hash value */
456         unsigned int hash = hash_32((unsigned int)page, 16);
457         unsigned int slot = hash % OBJECT_TABLE_SIZE;
458         unsigned long flags;
459
460         PPageHashEntry entry =
461             (PPageHashEntry) allocate_record(NODE_PAGE_RECORD);
462         if (!entry) {
463                 pr_debug
464                     ("[get_record][KERNEL_PAGE_ALLOC_BACKTRACE]can't get enough memory to create page entry\n");
465                 return NULL;
466         }
467         /* initialize page entry */
468         entry->page = page;
469         entry->size = 1 << order;
470         entry->allocate_map_entry = map_entry;
471         entry->bt_entry = bt_entry;
472         entry->flag = (2 | flag);
473         entry->prev = NULL;
474         spin_lock_irqsave(&page_record_lock, flags);
475
476         /* insert the entry to the head of slot list */
477         if (gPageHashTable.page_hash_table[slot] == NULL) {
478                 entry->next = NULL;
479         } else {
480                 (gPageHashTable.page_hash_table[slot])->prev = entry;
481                 entry->next = gPageHashTable.page_hash_table[slot];
482         }
483         gPageHashTable.page_hash_table[slot] = entry;
484         gPageHashTable.count++;
485         page_record_total++;
486         if (page_record_total > page_record_max) {
487                 page_record_max = page_record_total;
488         }
489         page_record_count++;
490         if (page_record_count >= 1000) {
491                 page_recorder_memory_usage =
492                     page_record_total * sizeof(PageHashEntry) +
493                     bt_record_total * (sizeof(PageObjectEntry) +
494                                        (20 * sizeof(unsigned int)));
495                 pr_debug
496                     ("[TOTAL PAGE RECORD !!!] page record size is %d max page record size is %d\n",
497                      page_record_total * sizeof(PageHashEntry),
498                      page_record_max * sizeof(PageHashEntry));
499                 pr_debug
500                     ("[TOTAL BACKTRACE RECORD !!!] bt record size is %d max bt record size is %d\n",
501                      bt_record_total * (sizeof(PageObjectEntry) +
502                                         (20 * sizeof(unsigned int))),
503                      bt_record_max * (sizeof(PageObjectEntry) +
504                                       (20 * sizeof(unsigned int))));
505                 page_record_count = 0;
506         }
507
508         spin_unlock_irqrestore(&page_record_lock, flags);
509         return entry;
510 }
511
512 int remove_page_info(void *page, unsigned int order)
513 {
514         unsigned int hash = hash_32((unsigned int)page, 16);
515         unsigned int slot = hash % OBJECT_TABLE_SIZE;
516         PageObjectEntry *bt_entry = NULL;
517         PageHashEntry *entry = NULL;
518         unsigned long flags;
519
520         /* search page record in hash table */
521         if (page_record_lock_init == 0) {
522                 page_record_lock_init = 1;
523                 spin_lock_init(&page_record_lock);
524         }
525         if (bt_record_lock_init == 0) {
526                 bt_record_lock_init = 1;
527                 spin_lock_init(&bt_record_lock);
528         }
529
530         spin_lock_irqsave(&page_record_lock, flags);
531         entry = find_page_entry(page, slot);
532         if (entry == NULL) {
533                 spin_unlock_irqrestore(&page_record_lock, flags);
534                 /* pr_debug("[remove_page_info]can't find page info 0x%x\n",page); */
535                 if (debug_log) {
536                         get_kernel_backtrace(NULL, 1);
537                 }
538                 return 1;
539         } else {
540                 /* remove page record from hash table */
541                 /* head */
542                 if (entry->prev == NULL) {
543                         gPageHashTable.page_hash_table[slot] = entry->next;
544                         /* not only one entry in the slot */
545                         if (gPageHashTable.page_hash_table[slot] != NULL)
546                                 gPageHashTable.page_hash_table[slot]->prev =
547                                     NULL;
548                 } else if (entry->next == NULL) {
549                         entry->prev->next = NULL;
550                 } else {
551                         entry->next->prev = entry->prev;
552                         entry->prev->next = entry->next;
553                 }
554
555                 gPageHashTable.count--;
556                 page_record_total--;
557                 spin_unlock_irqrestore(&page_record_lock, flags);
558
559                 /* clean page entry */
560                 entry->next = NULL;
561                 entry->prev = NULL;
562                 bt_entry = entry->bt_entry;
563                 kmem_cache_free(page_cachep, entry);
564
565                 /* create alloc bt entry for historical allocation */
566                 if (bt_entry == NULL) {
567                         return -1;
568                 } else {
569                         spin_lock_irqsave(&bt_record_lock, flags);
570                         if (bt_entry->reference > 1) {
571                                 (bt_entry->reference)--;
572                                 bt_entry->size = bt_entry->size - (1 << order);
573                                 spin_unlock_irqrestore(&bt_record_lock, flags);
574                                 pr_debug
575                                     ("[remove_page_info] bt_entry->size %d\n",
576                                      bt_entry->size);
577                         } else if (bt_entry->reference == 1) {
578                                 unsigned int hash_bt;
579                                 unsigned int slot_bt;
580                                 hash_bt =
581                                     get_hash(bt_entry->object,
582                                              bt_entry->numEntries);
583                                 slot_bt = hash_bt % OBJECT_TABLE_SIZE;
584
585                                 if (bt_entry->prev == NULL) {   /* head */
586                                         gKernelPageBtTable.slots[slot_bt] =
587                                             bt_entry->next;
588                                         /* not only one entry in the slot */
589                                         if (gKernelPageBtTable.slots[slot_bt] !=
590                                             NULL)
591                                                 gKernelPageBtTable.slots
592                                                     [slot_bt]->prev = NULL;
593                                 } else if (bt_entry->next == NULL) {
594                                         bt_entry->prev->next = NULL;
595                                 } else {
596                                         bt_entry->next->prev = bt_entry->prev;
597                                         bt_entry->prev->next = bt_entry->next;
598                                 }
599                                 spin_unlock_irqrestore(&bt_record_lock, flags);
600                                 bt_record_total--;
601                                 kfree(bt_entry);
602                         } else {
603                                 spin_unlock_irqrestore(&bt_record_lock, flags);
604                                 pr_err("ERROR !!!!free page info\n");
605                         }
606                 }
607         }
608         return 0;
609 }
610
611 int record_page_record(void *page, unsigned int order)
612 {
613         void *entry, *map_entry = NULL;
614         page_record_t record_param;
615         if (!page_recorder_debug) {
616                 return 0;
617         }
618         if (page_record_lock_init == 0) {
619                 page_record_lock_init = 1;
620                 spin_lock_init(&page_record_lock);
621         }
622         if (bt_record_lock_init == 0) {
623                 bt_record_lock_init = 1;
624                 spin_lock_init(&bt_record_lock);
625         }
626         if (debug_log & 1) {
627                 /* get_kernel_backtrace(NULL,1); */
628         }
629         record_param.page = page;
630         record_param.size = order;
631         record_param.backtrace_num =
632             (unsigned int)get_kernel_backtrace((unsigned long *)
633                                                record_param.backtrace,
634                                                (unsigned int)0);
635
636         entry =
637             get_record(HASH_PAGE_NODE_KERNEL_PAGE_ALLOC_BACKTRACE,
638                        &record_param);
639         if (entry == NULL) {
640                 pr_debug("[get_record][KERNEL_PAGE_ALLOC_BACKTRACE]");
641                 pr_debug
642                     ("can't get enough memory to create backtrace object\n");
643                 return 0;
644         }
645         record_page_info((PageObjectEntry *) entry,
646                          (PageObjectEntry *) map_entry, record_param.page,
647                          record_param.size, 0);
648         return 1;
649 }
650
651 EXPORT_SYMBOL(record_page_record);
652
653 int remove_page_record(void *page, unsigned int order)
654 {
655         page_record_t record_param;
656         record_param.page = page;
657         record_param.size = order;
658
659         if (!page_recorder_debug) {
660                 return 0;
661         }
662         /* record_param.backtrace_num = get_kernel_backtrace(
663            (unsigned long *)record_param.backtrace,(unsigned int)0); */
664         /* get_kernel_symbol((unsigned long *)record_param.backtrace,
665            record_param.backtrace_num,&(record_param.kernel_symbol[0])); */
666         if (debug_log & 2) {
667                 /* get_kernel_backtrace(NULL,1); */
668         }
669
670         remove_page_info(record_param.page, record_param.size);
671         return 1;
672 }
673
674 EXPORT_SYMBOL(remove_page_record);
675
676 static int page_recorder_debug_show(struct seq_file *s, void *unused)
677 {
678         unsigned int index = 0;
679         unsigned int *backtrace;
680         unsigned int rank_index = 0;
681         char symbol[KSYM_SYMBOL_LEN];
682         unsigned int i = 0;
683         struct page_object_rank_entry *rank_head = NULL;
684         struct page_object_rank_entry *rank_tail = NULL;
685         unsigned int Object_rank_count = 0;
686         PageObjectEntry *tmp = NULL;
687         unsigned long flags;
688
689         seq_printf(s, "page_recorder_debug: [%d]\n", page_recorder_debug);
690         seq_printf(s, "page_recorder_limit: [%d]\n", page_recorder_limit);
691         seq_printf(s, "TOP %d page allocation \n", Object_rank_max);
692         for (index = 0; index < OBJECT_TABLE_SIZE; index++) {
693                 tmp = NULL;
694                 spin_lock_irqsave(&bt_record_lock, flags);
695                 tmp = gKernelPageBtTable.slots[index];
696                 while (tmp != NULL) {
697                         struct page_object_rank_entry *rank_tmp = rank_head;
698                         struct page_object_rank_entry *rank_tmp_prev =
699                             rank_head;
700                         for (rank_index = 0; rank_index < Object_rank_max;
701                              rank_index++) {
702                                 struct page_object_rank_entry *new_rank_entry =
703                                     NULL;
704                                 PageObjectEntry *entry = NULL;
705                                 if ((rank_tmp != NULL)
706                                     && (rank_tmp->entry->size <= tmp->size)) {
707                                         /* insert current record into list */
708                                         PageObjectEntry *entry = NULL;
709                                         new_rank_entry =
710                                             (struct page_object_rank_entry *)
711                                             kmalloc(sizeof
712                                                     (struct
713                                                      page_object_rank_entry),
714                                                     GFP_ATOMIC);
715                                         if (new_rank_entry == NULL) {
716                                                 spin_unlock_irqrestore
717                                                     (&bt_record_lock, flags);
718                                                 pr_err
719                                                     ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
720                                                 return NULL;
721                                         }
722                                         entry =
723                                             kmalloc(sizeof(PageObjectEntry) +
724                                                     (20 * sizeof(unsigned int)),
725                                                     GFP_ATOMIC);
726                                         if (entry == NULL) {
727                                                 spin_unlock_irqrestore
728                                                     (&bt_record_lock, flags);
729                                                 pr_err
730                                                     ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
731                                                 return NULL;
732                                         }
733                                         memcpy(entry, tmp,
734                                                sizeof(PageObjectEntry) +
735                                                (20 * sizeof(unsigned int)));
736                                         new_rank_entry->entry = entry;
737                                         new_rank_entry->prev = rank_tmp->prev;
738                                         if (rank_tmp->prev != NULL) {
739                                                 rank_tmp->prev->next =
740                                                     new_rank_entry;
741                                         }
742                                         rank_tmp->prev = new_rank_entry;
743                                         new_rank_entry->next = rank_tmp;
744                                         if (new_rank_entry->prev == NULL) {
745                                                 rank_head = new_rank_entry;
746                                         }
747                                         if (Object_rank_count <
748                                             (Object_rank_max)) {
749                                                 Object_rank_count++;
750                                         } else {
751                                                 /* free last rank_entry */
752                                                 if (rank_tail != NULL) {
753                                                         struct
754                                                             page_object_rank_entry
755                                                         *new_tail = NULL;
756                                                         new_tail =
757                                                             rank_tail->prev;
758                                                         rank_tail->prev->next =
759                                                             NULL;
760                                                         kfree(rank_tail->entry);
761                                                         kfree(rank_tail);
762                                                         rank_tail = new_tail;
763                                                 } else {
764                                                         pr_err
765                                                             ("ERROR!!! rank_tail is NULL\n");
766                                                 }
767                                         }
768                                         break;
769                                 } else if ((rank_tmp == NULL)
770                                            && (Object_rank_count <
771                                                Object_rank_max)) {
772                                         /* if rank entry is less than object_entry_max,
773                                            create new rank entry and insert it in rank list */
774                                         new_rank_entry =
775                                             (struct page_object_rank_entry *)
776                                             kmalloc(sizeof
777                                                     (struct
778                                                      page_object_rank_entry),
779                                                     GFP_ATOMIC);
780                                         if (new_rank_entry == NULL) {
781                                                 spin_unlock_irqrestore
782                                                     (&bt_record_lock, flags);
783                                                 pr_err
784                                                     ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
785                                                 return NULL;
786                                         }
787                                         entry =
788                                             kmalloc(sizeof(PageObjectEntry) +
789                                                     (20 * sizeof(unsigned int)),
790                                                     GFP_ATOMIC);
791                                         if (entry == NULL) {
792                                                 spin_unlock_irqrestore
793                                                     (&bt_record_lock, flags);
794                                                 pr_err
795                                                     ("[PAGE_RECORDER]Error!!! can't get memory from kmalloc\n");
796                                                 return NULL;
797                                         }
798                                         memcpy(entry, tmp,
799                                                sizeof(PageObjectEntry) +
800                                                (20 * sizeof(unsigned int)));
801                                         new_rank_entry->entry = entry;
802                                         new_rank_entry->next = NULL;
803                                         new_rank_entry->prev = rank_tmp_prev;
804                                         if (rank_tmp_prev != NULL) {
805                                                 rank_tmp_prev->next =
806                                                     new_rank_entry;
807                                         }
808                                         if (new_rank_entry->prev == NULL) {
809                                                 rank_head = new_rank_entry;
810                                         }
811                                         rank_tail = new_rank_entry;
812                                         Object_rank_count++;
813                                         break;
814                                 }
815                                 rank_tmp_prev = rank_tmp;
816                                 rank_tmp = rank_tmp->next;
817                         }
818                         tmp = tmp->next;
819                 }
820                 spin_unlock_irqrestore(&bt_record_lock, flags);
821         }
822
823         /* print top object_rank_max record */
824         {
825                 struct page_object_rank_entry *rank_tmp = rank_head;
826                 struct page_object_rank_entry *tmp_record = NULL;
827                 rank_index = 0;
828                 while (rank_tmp != NULL) {
829                         backtrace = (unsigned int *)rank_tmp->entry->object;
830                         seq_printf(s, "[%d]%s %d %s\n", rank_index,
831                                    "Backtrace pages ",
832                                    rank_tmp->entry->size * 4096, "bytes");
833                         for (i = 0; i < rank_tmp->entry->numEntries; i++) {
834                                 sprint_symbol(symbol, *(backtrace + i));
835                                 seq_printf(s,
836                                            "  KERNEL[%d] 0x%x :: symbol %s\n",
837                                            i, backtrace[i], symbol);
838                         }
839                         rank_index++;
840                         tmp_record = rank_tmp;
841                         rank_tmp = rank_tmp->next;
842                         kfree(tmp_record->entry);
843                         kfree(tmp_record);
844                 }
845         }
846         return 0;
847 }
848
849 static int __init setup_page_recorder_debug(char *str)
850 {
851         page_recorder_debug = DEBUG_DEFAULT_FLAGS;
852         if (*str++ != '=' || !*str)
853                 /*
854                  * No options specified. Switch on full debugging.
855                  */
856                 goto out;
857
858         if (*str == ',')
859                 /*
860                  * No options but restriction on page recorder. This means full
861                  * debugging for page recorder matching a pattern.
862                  */
863                 goto check_page_recorder;
864
865         page_recorder_debug = 0;
866         if (*str == '-')
867                 /*
868                  * Switch off all debugging measures.
869                  */
870                 goto out;
871
872 check_page_recorder:
873         if (*str == ',')
874                 page_recorder_debug_function = *(str + 1);
875 out:
876         return 1;
877 }
878
879 __setup("page_recorder_debug", setup_page_recorder_debug);
880
881 static int __init page_recorder_init(void)
882 {
883         /* Create page allocate */
884         debug_root = debugfs_create_dir("page_recorder", NULL);
885         debugfs_create_file("Usage_rank", 0444, debug_root, NULL,
886                             &debug_page_recorder_fops);
887         debugfs_create_u32("Rank_number", 0644, debug_root, &Object_rank_max);
888         debugfs_create_file("query_page", 0644, debug_root, NULL,
889                             &query_page_ios_fops);
890         debugfs_create_u32("page_virtual_address", 0644, debug_root,
891                            &queried_address);
892         debugfs_create_u32("debug_log", 0644, debug_root, &debug_log);
893         debugfs_create_u32("page_recorder_debug", 0644, debug_root,
894                            &page_recorder_debug);
895         debugfs_create_u32("page_recorder_memory_usage", 0644, debug_root,
896                            &page_recorder_memory_usage);
897         debugfs_create_u32("page_recorder_limit", 0644, debug_root,
898                            &page_recorder_limit);
899         return 0;
900 }
901
902 late_initcall(page_recorder_init);