perf kmem: Support field "node" in evsel__process_alloc_event() coping with recent...
[platform/kernel/linux-starfive.git] / tools / perf / builtin-kmem.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "builtin.h"
3 #include "perf.h"
4
5 #include "util/dso.h"
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/config.h"
9 #include "util/map.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/callchain.h"
16 #include "util/time-utils.h"
17 #include <linux/err.h>
18
19 #include <subcmd/pager.h>
20 #include <subcmd/parse-options.h>
21 #include "util/trace-event.h"
22 #include "util/data.h"
23 #include "util/cpumap.h"
24
25 #include "util/debug.h"
26 #include "util/string2.h"
27
28 #include <linux/kernel.h>
29 #include <linux/numa.h>
30 #include <linux/rbtree.h>
31 #include <linux/string.h>
32 #include <linux/zalloc.h>
33 #include <errno.h>
34 #include <inttypes.h>
35 #include <locale.h>
36 #include <regex.h>
37
38 #include <linux/ctype.h>
39
40 static int      kmem_slab;
41 static int      kmem_page;
42
43 static long     kmem_page_size;
44 static enum {
45         KMEM_SLAB,
46         KMEM_PAGE,
47 } kmem_default = KMEM_SLAB;  /* for backward compatibility */
48
49 struct alloc_stat;
50 typedef int (*sort_fn_t)(void *, void *);
51
52 static int                      alloc_flag;
53 static int                      caller_flag;
54
55 static int                      alloc_lines = -1;
56 static int                      caller_lines = -1;
57
58 static bool                     raw_ip;
59
60 struct alloc_stat {
61         u64     call_site;
62         u64     ptr;
63         u64     bytes_req;
64         u64     bytes_alloc;
65         u64     last_alloc;
66         u32     hit;
67         u32     pingpong;
68
69         short   alloc_cpu;
70
71         struct rb_node node;
72 };
73
74 static struct rb_root root_alloc_stat;
75 static struct rb_root root_alloc_sorted;
76 static struct rb_root root_caller_stat;
77 static struct rb_root root_caller_sorted;
78
79 static unsigned long total_requested, total_allocated, total_freed;
80 static unsigned long nr_allocs, nr_cross_allocs;
81
82 /* filters for controlling start and stop of time of analysis */
83 static struct perf_time_interval ptime;
84 const char *time_str;
85
86 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
87                              int bytes_req, int bytes_alloc, int cpu)
88 {
89         struct rb_node **node = &root_alloc_stat.rb_node;
90         struct rb_node *parent = NULL;
91         struct alloc_stat *data = NULL;
92
93         while (*node) {
94                 parent = *node;
95                 data = rb_entry(*node, struct alloc_stat, node);
96
97                 if (ptr > data->ptr)
98                         node = &(*node)->rb_right;
99                 else if (ptr < data->ptr)
100                         node = &(*node)->rb_left;
101                 else
102                         break;
103         }
104
105         if (data && data->ptr == ptr) {
106                 data->hit++;
107                 data->bytes_req += bytes_req;
108                 data->bytes_alloc += bytes_alloc;
109         } else {
110                 data = malloc(sizeof(*data));
111                 if (!data) {
112                         pr_err("%s: malloc failed\n", __func__);
113                         return -1;
114                 }
115                 data->ptr = ptr;
116                 data->pingpong = 0;
117                 data->hit = 1;
118                 data->bytes_req = bytes_req;
119                 data->bytes_alloc = bytes_alloc;
120
121                 rb_link_node(&data->node, parent, node);
122                 rb_insert_color(&data->node, &root_alloc_stat);
123         }
124         data->call_site = call_site;
125         data->alloc_cpu = cpu;
126         data->last_alloc = bytes_alloc;
127
128         return 0;
129 }
130
131 static int insert_caller_stat(unsigned long call_site,
132                               int bytes_req, int bytes_alloc)
133 {
134         struct rb_node **node = &root_caller_stat.rb_node;
135         struct rb_node *parent = NULL;
136         struct alloc_stat *data = NULL;
137
138         while (*node) {
139                 parent = *node;
140                 data = rb_entry(*node, struct alloc_stat, node);
141
142                 if (call_site > data->call_site)
143                         node = &(*node)->rb_right;
144                 else if (call_site < data->call_site)
145                         node = &(*node)->rb_left;
146                 else
147                         break;
148         }
149
150         if (data && data->call_site == call_site) {
151                 data->hit++;
152                 data->bytes_req += bytes_req;
153                 data->bytes_alloc += bytes_alloc;
154         } else {
155                 data = malloc(sizeof(*data));
156                 if (!data) {
157                         pr_err("%s: malloc failed\n", __func__);
158                         return -1;
159                 }
160                 data->call_site = call_site;
161                 data->pingpong = 0;
162                 data->hit = 1;
163                 data->bytes_req = bytes_req;
164                 data->bytes_alloc = bytes_alloc;
165
166                 rb_link_node(&data->node, parent, node);
167                 rb_insert_color(&data->node, &root_caller_stat);
168         }
169
170         return 0;
171 }
172
173 static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *sample)
174 {
175         unsigned long ptr = evsel__intval(evsel, sample, "ptr"),
176                       call_site = evsel__intval(evsel, sample, "call_site");
177         int bytes_req = evsel__intval(evsel, sample, "bytes_req"),
178             bytes_alloc = evsel__intval(evsel, sample, "bytes_alloc");
179
180         if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
181             insert_caller_stat(call_site, bytes_req, bytes_alloc))
182                 return -1;
183
184         total_requested += bytes_req;
185         total_allocated += bytes_alloc;
186
187         nr_allocs++;
188
189         /*
190          * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
191          * version of tracepoints") adds the field "node" into the
192          * tracepoints 'kmalloc' and 'kmem_cache_alloc'.
193          *
194          * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
195          * also contain the field "node".
196          *
197          * If the tracepoint contains the field "node" the tool stats the
198          * cross allocation.
199          */
200         if (evsel__field(evsel, "node")) {
201                 int node1, node2;
202
203                 node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
204                 node2 = evsel__intval(evsel, sample, "node");
205
206                 /*
207                  * If the field "node" is NUMA_NO_NODE (-1), we don't take it
208                  * as a cross allocation.
209                  */
210                 if ((node2 != NUMA_NO_NODE) && (node1 != node2))
211                         nr_cross_allocs++;
212         }
213
214         return 0;
215 }
216
217 static int ptr_cmp(void *, void *);
218 static int slab_callsite_cmp(void *, void *);
219
220 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
221                                             unsigned long call_site,
222                                             struct rb_root *root,
223                                             sort_fn_t sort_fn)
224 {
225         struct rb_node *node = root->rb_node;
226         struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
227
228         while (node) {
229                 struct alloc_stat *data;
230                 int cmp;
231
232                 data = rb_entry(node, struct alloc_stat, node);
233
234                 cmp = sort_fn(&key, data);
235                 if (cmp < 0)
236                         node = node->rb_left;
237                 else if (cmp > 0)
238                         node = node->rb_right;
239                 else
240                         return data;
241         }
242         return NULL;
243 }
244
245 static int evsel__process_free_event(struct evsel *evsel, struct perf_sample *sample)
246 {
247         unsigned long ptr = evsel__intval(evsel, sample, "ptr");
248         struct alloc_stat *s_alloc, *s_caller;
249
250         s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
251         if (!s_alloc)
252                 return 0;
253
254         total_freed += s_alloc->last_alloc;
255
256         if ((short)sample->cpu != s_alloc->alloc_cpu) {
257                 s_alloc->pingpong++;
258
259                 s_caller = search_alloc_stat(0, s_alloc->call_site,
260                                              &root_caller_stat,
261                                              slab_callsite_cmp);
262                 if (!s_caller)
263                         return -1;
264                 s_caller->pingpong++;
265         }
266         s_alloc->alloc_cpu = -1;
267
268         return 0;
269 }
270
271 static u64 total_page_alloc_bytes;
272 static u64 total_page_free_bytes;
273 static u64 total_page_nomatch_bytes;
274 static u64 total_page_fail_bytes;
275 static unsigned long nr_page_allocs;
276 static unsigned long nr_page_frees;
277 static unsigned long nr_page_fails;
278 static unsigned long nr_page_nomatch;
279
280 static bool use_pfn;
281 static bool live_page;
282 static struct perf_session *kmem_session;
283
284 #define MAX_MIGRATE_TYPES  6
285 #define MAX_PAGE_ORDER     11
286
287 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
288
289 struct page_stat {
290         struct rb_node  node;
291         u64             page;
292         u64             callsite;
293         int             order;
294         unsigned        gfp_flags;
295         unsigned        migrate_type;
296         u64             alloc_bytes;
297         u64             free_bytes;
298         int             nr_alloc;
299         int             nr_free;
300 };
301
302 static struct rb_root page_live_tree;
303 static struct rb_root page_alloc_tree;
304 static struct rb_root page_alloc_sorted;
305 static struct rb_root page_caller_tree;
306 static struct rb_root page_caller_sorted;
307
308 struct alloc_func {
309         u64 start;
310         u64 end;
311         char *name;
312 };
313
314 static int nr_alloc_funcs;
315 static struct alloc_func *alloc_func_list;
316
317 static int funcmp(const void *a, const void *b)
318 {
319         const struct alloc_func *fa = a;
320         const struct alloc_func *fb = b;
321
322         if (fa->start > fb->start)
323                 return 1;
324         else
325                 return -1;
326 }
327
328 static int callcmp(const void *a, const void *b)
329 {
330         const struct alloc_func *fa = a;
331         const struct alloc_func *fb = b;
332
333         if (fb->start <= fa->start && fa->end < fb->end)
334                 return 0;
335
336         if (fa->start > fb->start)
337                 return 1;
338         else
339                 return -1;
340 }
341
342 static int build_alloc_func_list(void)
343 {
344         int ret;
345         struct map *kernel_map;
346         struct symbol *sym;
347         struct rb_node *node;
348         struct alloc_func *func;
349         struct machine *machine = &kmem_session->machines.host;
350         regex_t alloc_func_regex;
351         static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
352
353         ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
354         if (ret) {
355                 char err[BUFSIZ];
356
357                 regerror(ret, &alloc_func_regex, err, sizeof(err));
358                 pr_err("Invalid regex: %s\n%s", pattern, err);
359                 return -EINVAL;
360         }
361
362         kernel_map = machine__kernel_map(machine);
363         if (map__load(kernel_map) < 0) {
364                 pr_err("cannot load kernel map\n");
365                 return -ENOENT;
366         }
367
368         map__for_each_symbol(kernel_map, sym, node) {
369                 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
370                         continue;
371
372                 func = realloc(alloc_func_list,
373                                (nr_alloc_funcs + 1) * sizeof(*func));
374                 if (func == NULL)
375                         return -ENOMEM;
376
377                 pr_debug("alloc func: %s\n", sym->name);
378                 func[nr_alloc_funcs].start = sym->start;
379                 func[nr_alloc_funcs].end   = sym->end;
380                 func[nr_alloc_funcs].name  = sym->name;
381
382                 alloc_func_list = func;
383                 nr_alloc_funcs++;
384         }
385
386         qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
387
388         regfree(&alloc_func_regex);
389         return 0;
390 }
391
392 /*
393  * Find first non-memory allocation function from callchain.
394  * The allocation functions are in the 'alloc_func_list'.
395  */
396 static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
397 {
398         struct addr_location al;
399         struct machine *machine = &kmem_session->machines.host;
400         struct callchain_cursor_node *node;
401
402         if (alloc_func_list == NULL) {
403                 if (build_alloc_func_list() < 0)
404                         goto out;
405         }
406
407         al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
408         sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
409
410         callchain_cursor_commit(&callchain_cursor);
411         while (true) {
412                 struct alloc_func key, *caller;
413                 u64 addr;
414
415                 node = callchain_cursor_current(&callchain_cursor);
416                 if (node == NULL)
417                         break;
418
419                 key.start = key.end = node->ip;
420                 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
421                                  sizeof(key), callcmp);
422                 if (!caller) {
423                         /* found */
424                         if (node->ms.map)
425                                 addr = map__unmap_ip(node->ms.map, node->ip);
426                         else
427                                 addr = node->ip;
428
429                         return addr;
430                 } else
431                         pr_debug3("skipping alloc function: %s\n", caller->name);
432
433                 callchain_cursor_advance(&callchain_cursor);
434         }
435
436 out:
437         pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
438         return sample->ip;
439 }
440
441 struct sort_dimension {
442         const char              name[20];
443         sort_fn_t               cmp;
444         struct list_head        list;
445 };
446
447 static LIST_HEAD(page_alloc_sort_input);
448 static LIST_HEAD(page_caller_sort_input);
449
450 static struct page_stat *
451 __page_stat__findnew_page(struct page_stat *pstat, bool create)
452 {
453         struct rb_node **node = &page_live_tree.rb_node;
454         struct rb_node *parent = NULL;
455         struct page_stat *data;
456
457         while (*node) {
458                 s64 cmp;
459
460                 parent = *node;
461                 data = rb_entry(*node, struct page_stat, node);
462
463                 cmp = data->page - pstat->page;
464                 if (cmp < 0)
465                         node = &parent->rb_left;
466                 else if (cmp > 0)
467                         node = &parent->rb_right;
468                 else
469                         return data;
470         }
471
472         if (!create)
473                 return NULL;
474
475         data = zalloc(sizeof(*data));
476         if (data != NULL) {
477                 data->page = pstat->page;
478                 data->order = pstat->order;
479                 data->gfp_flags = pstat->gfp_flags;
480                 data->migrate_type = pstat->migrate_type;
481
482                 rb_link_node(&data->node, parent, node);
483                 rb_insert_color(&data->node, &page_live_tree);
484         }
485
486         return data;
487 }
488
489 static struct page_stat *page_stat__find_page(struct page_stat *pstat)
490 {
491         return __page_stat__findnew_page(pstat, false);
492 }
493
494 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
495 {
496         return __page_stat__findnew_page(pstat, true);
497 }
498
499 static struct page_stat *
500 __page_stat__findnew_alloc(struct page_stat *pstat, bool create)
501 {
502         struct rb_node **node = &page_alloc_tree.rb_node;
503         struct rb_node *parent = NULL;
504         struct page_stat *data;
505         struct sort_dimension *sort;
506
507         while (*node) {
508                 int cmp = 0;
509
510                 parent = *node;
511                 data = rb_entry(*node, struct page_stat, node);
512
513                 list_for_each_entry(sort, &page_alloc_sort_input, list) {
514                         cmp = sort->cmp(pstat, data);
515                         if (cmp)
516                                 break;
517                 }
518
519                 if (cmp < 0)
520                         node = &parent->rb_left;
521                 else if (cmp > 0)
522                         node = &parent->rb_right;
523                 else
524                         return data;
525         }
526
527         if (!create)
528                 return NULL;
529
530         data = zalloc(sizeof(*data));
531         if (data != NULL) {
532                 data->page = pstat->page;
533                 data->order = pstat->order;
534                 data->gfp_flags = pstat->gfp_flags;
535                 data->migrate_type = pstat->migrate_type;
536
537                 rb_link_node(&data->node, parent, node);
538                 rb_insert_color(&data->node, &page_alloc_tree);
539         }
540
541         return data;
542 }
543
544 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
545 {
546         return __page_stat__findnew_alloc(pstat, false);
547 }
548
549 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
550 {
551         return __page_stat__findnew_alloc(pstat, true);
552 }
553
554 static struct page_stat *
555 __page_stat__findnew_caller(struct page_stat *pstat, bool create)
556 {
557         struct rb_node **node = &page_caller_tree.rb_node;
558         struct rb_node *parent = NULL;
559         struct page_stat *data;
560         struct sort_dimension *sort;
561
562         while (*node) {
563                 int cmp = 0;
564
565                 parent = *node;
566                 data = rb_entry(*node, struct page_stat, node);
567
568                 list_for_each_entry(sort, &page_caller_sort_input, list) {
569                         cmp = sort->cmp(pstat, data);
570                         if (cmp)
571                                 break;
572                 }
573
574                 if (cmp < 0)
575                         node = &parent->rb_left;
576                 else if (cmp > 0)
577                         node = &parent->rb_right;
578                 else
579                         return data;
580         }
581
582         if (!create)
583                 return NULL;
584
585         data = zalloc(sizeof(*data));
586         if (data != NULL) {
587                 data->callsite = pstat->callsite;
588                 data->order = pstat->order;
589                 data->gfp_flags = pstat->gfp_flags;
590                 data->migrate_type = pstat->migrate_type;
591
592                 rb_link_node(&data->node, parent, node);
593                 rb_insert_color(&data->node, &page_caller_tree);
594         }
595
596         return data;
597 }
598
599 static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
600 {
601         return __page_stat__findnew_caller(pstat, false);
602 }
603
604 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
605 {
606         return __page_stat__findnew_caller(pstat, true);
607 }
608
609 static bool valid_page(u64 pfn_or_page)
610 {
611         if (use_pfn && pfn_or_page == -1UL)
612                 return false;
613         if (!use_pfn && pfn_or_page == 0)
614                 return false;
615         return true;
616 }
617
618 struct gfp_flag {
619         unsigned int flags;
620         char *compact_str;
621         char *human_readable;
622 };
623
624 static struct gfp_flag *gfps;
625 static int nr_gfps;
626
627 static int gfpcmp(const void *a, const void *b)
628 {
629         const struct gfp_flag *fa = a;
630         const struct gfp_flag *fb = b;
631
632         return fa->flags - fb->flags;
633 }
634
635 /* see include/trace/events/mmflags.h */
636 static const struct {
637         const char *original;
638         const char *compact;
639 } gfp_compact_table[] = {
640         { "GFP_TRANSHUGE",              "THP" },
641         { "GFP_TRANSHUGE_LIGHT",        "THL" },
642         { "GFP_HIGHUSER_MOVABLE",       "HUM" },
643         { "GFP_HIGHUSER",               "HU" },
644         { "GFP_USER",                   "U" },
645         { "GFP_KERNEL_ACCOUNT",         "KAC" },
646         { "GFP_KERNEL",                 "K" },
647         { "GFP_NOFS",                   "NF" },
648         { "GFP_ATOMIC",                 "A" },
649         { "GFP_NOIO",                   "NI" },
650         { "GFP_NOWAIT",                 "NW" },
651         { "GFP_DMA",                    "D" },
652         { "__GFP_HIGHMEM",              "HM" },
653         { "GFP_DMA32",                  "D32" },
654         { "__GFP_HIGH",                 "H" },
655         { "__GFP_ATOMIC",               "_A" },
656         { "__GFP_IO",                   "I" },
657         { "__GFP_FS",                   "F" },
658         { "__GFP_NOWARN",               "NWR" },
659         { "__GFP_RETRY_MAYFAIL",        "R" },
660         { "__GFP_NOFAIL",               "NF" },
661         { "__GFP_NORETRY",              "NR" },
662         { "__GFP_COMP",                 "C" },
663         { "__GFP_ZERO",                 "Z" },
664         { "__GFP_NOMEMALLOC",           "NMA" },
665         { "__GFP_MEMALLOC",             "MA" },
666         { "__GFP_HARDWALL",             "HW" },
667         { "__GFP_THISNODE",             "TN" },
668         { "__GFP_RECLAIMABLE",          "RC" },
669         { "__GFP_MOVABLE",              "M" },
670         { "__GFP_ACCOUNT",              "AC" },
671         { "__GFP_WRITE",                "WR" },
672         { "__GFP_RECLAIM",              "R" },
673         { "__GFP_DIRECT_RECLAIM",       "DR" },
674         { "__GFP_KSWAPD_RECLAIM",       "KR" },
675 };
676
677 static size_t max_gfp_len;
678
679 static char *compact_gfp_flags(char *gfp_flags)
680 {
681         char *orig_flags = strdup(gfp_flags);
682         char *new_flags = NULL;
683         char *str, *pos = NULL;
684         size_t len = 0;
685
686         if (orig_flags == NULL)
687                 return NULL;
688
689         str = strtok_r(orig_flags, "|", &pos);
690         while (str) {
691                 size_t i;
692                 char *new;
693                 const char *cpt;
694
695                 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
696                         if (strcmp(gfp_compact_table[i].original, str))
697                                 continue;
698
699                         cpt = gfp_compact_table[i].compact;
700                         new = realloc(new_flags, len + strlen(cpt) + 2);
701                         if (new == NULL) {
702                                 free(new_flags);
703                                 free(orig_flags);
704                                 return NULL;
705                         }
706
707                         new_flags = new;
708
709                         if (!len) {
710                                 strcpy(new_flags, cpt);
711                         } else {
712                                 strcat(new_flags, "|");
713                                 strcat(new_flags, cpt);
714                                 len++;
715                         }
716
717                         len += strlen(cpt);
718                 }
719
720                 str = strtok_r(NULL, "|", &pos);
721         }
722
723         if (max_gfp_len < len)
724                 max_gfp_len = len;
725
726         free(orig_flags);
727         return new_flags;
728 }
729
730 static char *compact_gfp_string(unsigned long gfp_flags)
731 {
732         struct gfp_flag key = {
733                 .flags = gfp_flags,
734         };
735         struct gfp_flag *gfp;
736
737         gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
738         if (gfp)
739                 return gfp->compact_str;
740
741         return NULL;
742 }
743
744 static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
745                            unsigned int gfp_flags)
746 {
747         struct tep_record record = {
748                 .cpu = sample->cpu,
749                 .data = sample->raw_data,
750                 .size = sample->raw_size,
751         };
752         struct trace_seq seq;
753         char *str, *pos = NULL;
754
755         if (nr_gfps) {
756                 struct gfp_flag key = {
757                         .flags = gfp_flags,
758                 };
759
760                 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
761                         return 0;
762         }
763
764         trace_seq_init(&seq);
765         tep_print_event(evsel->tp_format->tep,
766                         &seq, &record, "%s", TEP_PRINT_INFO);
767
768         str = strtok_r(seq.buffer, " ", &pos);
769         while (str) {
770                 if (!strncmp(str, "gfp_flags=", 10)) {
771                         struct gfp_flag *new;
772
773                         new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
774                         if (new == NULL)
775                                 return -ENOMEM;
776
777                         gfps = new;
778                         new += nr_gfps++;
779
780                         new->flags = gfp_flags;
781                         new->human_readable = strdup(str + 10);
782                         new->compact_str = compact_gfp_flags(str + 10);
783                         if (!new->human_readable || !new->compact_str)
784                                 return -ENOMEM;
785
786                         qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
787                 }
788
789                 str = strtok_r(NULL, " ", &pos);
790         }
791
792         trace_seq_destroy(&seq);
793         return 0;
794 }
795
796 static int evsel__process_page_alloc_event(struct evsel *evsel, struct perf_sample *sample)
797 {
798         u64 page;
799         unsigned int order = evsel__intval(evsel, sample, "order");
800         unsigned int gfp_flags = evsel__intval(evsel, sample, "gfp_flags");
801         unsigned int migrate_type = evsel__intval(evsel, sample,
802                                                        "migratetype");
803         u64 bytes = kmem_page_size << order;
804         u64 callsite;
805         struct page_stat *pstat;
806         struct page_stat this = {
807                 .order = order,
808                 .gfp_flags = gfp_flags,
809                 .migrate_type = migrate_type,
810         };
811
812         if (use_pfn)
813                 page = evsel__intval(evsel, sample, "pfn");
814         else
815                 page = evsel__intval(evsel, sample, "page");
816
817         nr_page_allocs++;
818         total_page_alloc_bytes += bytes;
819
820         if (!valid_page(page)) {
821                 nr_page_fails++;
822                 total_page_fail_bytes += bytes;
823
824                 return 0;
825         }
826
827         if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
828                 return -1;
829
830         callsite = find_callsite(evsel, sample);
831
832         /*
833          * This is to find the current page (with correct gfp flags and
834          * migrate type) at free event.
835          */
836         this.page = page;
837         pstat = page_stat__findnew_page(&this);
838         if (pstat == NULL)
839                 return -ENOMEM;
840
841         pstat->nr_alloc++;
842         pstat->alloc_bytes += bytes;
843         pstat->callsite = callsite;
844
845         if (!live_page) {
846                 pstat = page_stat__findnew_alloc(&this);
847                 if (pstat == NULL)
848                         return -ENOMEM;
849
850                 pstat->nr_alloc++;
851                 pstat->alloc_bytes += bytes;
852                 pstat->callsite = callsite;
853         }
854
855         this.callsite = callsite;
856         pstat = page_stat__findnew_caller(&this);
857         if (pstat == NULL)
858                 return -ENOMEM;
859
860         pstat->nr_alloc++;
861         pstat->alloc_bytes += bytes;
862
863         order_stats[order][migrate_type]++;
864
865         return 0;
866 }
867
868 static int evsel__process_page_free_event(struct evsel *evsel, struct perf_sample *sample)
869 {
870         u64 page;
871         unsigned int order = evsel__intval(evsel, sample, "order");
872         u64 bytes = kmem_page_size << order;
873         struct page_stat *pstat;
874         struct page_stat this = {
875                 .order = order,
876         };
877
878         if (use_pfn)
879                 page = evsel__intval(evsel, sample, "pfn");
880         else
881                 page = evsel__intval(evsel, sample, "page");
882
883         nr_page_frees++;
884         total_page_free_bytes += bytes;
885
886         this.page = page;
887         pstat = page_stat__find_page(&this);
888         if (pstat == NULL) {
889                 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
890                           page, order);
891
892                 nr_page_nomatch++;
893                 total_page_nomatch_bytes += bytes;
894
895                 return 0;
896         }
897
898         this.gfp_flags = pstat->gfp_flags;
899         this.migrate_type = pstat->migrate_type;
900         this.callsite = pstat->callsite;
901
902         rb_erase(&pstat->node, &page_live_tree);
903         free(pstat);
904
905         if (live_page) {
906                 order_stats[this.order][this.migrate_type]--;
907         } else {
908                 pstat = page_stat__find_alloc(&this);
909                 if (pstat == NULL)
910                         return -ENOMEM;
911
912                 pstat->nr_free++;
913                 pstat->free_bytes += bytes;
914         }
915
916         pstat = page_stat__find_caller(&this);
917         if (pstat == NULL)
918                 return -ENOENT;
919
920         pstat->nr_free++;
921         pstat->free_bytes += bytes;
922
923         if (live_page) {
924                 pstat->nr_alloc--;
925                 pstat->alloc_bytes -= bytes;
926
927                 if (pstat->nr_alloc == 0) {
928                         rb_erase(&pstat->node, &page_caller_tree);
929                         free(pstat);
930                 }
931         }
932
933         return 0;
934 }
935
936 static bool perf_kmem__skip_sample(struct perf_sample *sample)
937 {
938         /* skip sample based on time? */
939         if (perf_time__skip_sample(&ptime, sample->time))
940                 return true;
941
942         return false;
943 }
944
945 typedef int (*tracepoint_handler)(struct evsel *evsel,
946                                   struct perf_sample *sample);
947
948 static int process_sample_event(struct perf_tool *tool __maybe_unused,
949                                 union perf_event *event,
950                                 struct perf_sample *sample,
951                                 struct evsel *evsel,
952                                 struct machine *machine)
953 {
954         int err = 0;
955         struct thread *thread = machine__findnew_thread(machine, sample->pid,
956                                                         sample->tid);
957
958         if (thread == NULL) {
959                 pr_debug("problem processing %d event, skipping it.\n",
960                          event->header.type);
961                 return -1;
962         }
963
964         if (perf_kmem__skip_sample(sample))
965                 return 0;
966
967         dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
968
969         if (evsel->handler != NULL) {
970                 tracepoint_handler f = evsel->handler;
971                 err = f(evsel, sample);
972         }
973
974         thread__put(thread);
975
976         return err;
977 }
978
979 static struct perf_tool perf_kmem = {
980         .sample          = process_sample_event,
981         .comm            = perf_event__process_comm,
982         .mmap            = perf_event__process_mmap,
983         .mmap2           = perf_event__process_mmap2,
984         .namespaces      = perf_event__process_namespaces,
985         .ordered_events  = true,
986 };
987
988 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
989 {
990         if (n_alloc == 0)
991                 return 0.0;
992         else
993                 return 100.0 - (100.0 * n_req / n_alloc);
994 }
995
996 static void __print_slab_result(struct rb_root *root,
997                                 struct perf_session *session,
998                                 int n_lines, int is_caller)
999 {
1000         struct rb_node *next;
1001         struct machine *machine = &session->machines.host;
1002
1003         printf("%.105s\n", graph_dotted_line);
1004         printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
1005         printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
1006         printf("%.105s\n", graph_dotted_line);
1007
1008         next = rb_first(root);
1009
1010         while (next && n_lines--) {
1011                 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
1012                                                    node);
1013                 struct symbol *sym = NULL;
1014                 struct map *map;
1015                 char buf[BUFSIZ];
1016                 u64 addr;
1017
1018                 if (is_caller) {
1019                         addr = data->call_site;
1020                         if (!raw_ip)
1021                                 sym = machine__find_kernel_symbol(machine, addr, &map);
1022                 } else
1023                         addr = data->ptr;
1024
1025                 if (sym != NULL)
1026                         snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1027                                  addr - map->unmap_ip(map, sym->start));
1028                 else
1029                         snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1030                 printf(" %-34s |", buf);
1031
1032                 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1033                        (unsigned long long)data->bytes_alloc,
1034                        (unsigned long)data->bytes_alloc / data->hit,
1035                        (unsigned long long)data->bytes_req,
1036                        (unsigned long)data->bytes_req / data->hit,
1037                        (unsigned long)data->hit,
1038                        (unsigned long)data->pingpong,
1039                        fragmentation(data->bytes_req, data->bytes_alloc));
1040
1041                 next = rb_next(next);
1042         }
1043
1044         if (n_lines == -1)
1045                 printf(" ...                                | ...             | ...             | ...      | ...       | ...   \n");
1046
1047         printf("%.105s\n", graph_dotted_line);
1048 }
1049
1050 static const char * const migrate_type_str[] = {
1051         "UNMOVABL",
1052         "RECLAIM",
1053         "MOVABLE",
1054         "RESERVED",
1055         "CMA/ISLT",
1056         "UNKNOWN",
1057 };
1058
1059 static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1060 {
1061         struct rb_node *next = rb_first(&page_alloc_sorted);
1062         struct machine *machine = &session->machines.host;
1063         const char *format;
1064         int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1065
1066         printf("\n%.105s\n", graph_dotted_line);
1067         printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1068                use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1069                gfp_len, "GFP flags");
1070         printf("%.105s\n", graph_dotted_line);
1071
1072         if (use_pfn)
1073                 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1074         else
1075                 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1076
1077         while (next && n_lines--) {
1078                 struct page_stat *data;
1079                 struct symbol *sym;
1080                 struct map *map;
1081                 char buf[32];
1082                 char *caller = buf;
1083
1084                 data = rb_entry(next, struct page_stat, node);
1085                 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1086                 if (sym)
1087                         caller = sym->name;
1088                 else
1089                         scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1090
1091                 printf(format, (unsigned long long)data->page,
1092                        (unsigned long long)data->alloc_bytes / 1024,
1093                        data->nr_alloc, data->order,
1094                        migrate_type_str[data->migrate_type],
1095                        gfp_len, compact_gfp_string(data->gfp_flags), caller);
1096
1097                 next = rb_next(next);
1098         }
1099
1100         if (n_lines == -1) {
1101                 printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
1102                        gfp_len, "...");
1103         }
1104
1105         printf("%.105s\n", graph_dotted_line);
1106 }
1107
1108 static void __print_page_caller_result(struct perf_session *session, int n_lines)
1109 {
1110         struct rb_node *next = rb_first(&page_caller_sorted);
1111         struct machine *machine = &session->machines.host;
1112         int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1113
1114         printf("\n%.105s\n", graph_dotted_line);
1115         printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1116                live_page ? "Live" : "Total", gfp_len, "GFP flags");
1117         printf("%.105s\n", graph_dotted_line);
1118
1119         while (next && n_lines--) {
1120                 struct page_stat *data;
1121                 struct symbol *sym;
1122                 struct map *map;
1123                 char buf[32];
1124                 char *caller = buf;
1125
1126                 data = rb_entry(next, struct page_stat, node);
1127                 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1128                 if (sym)
1129                         caller = sym->name;
1130                 else
1131                         scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1132
1133                 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1134                        (unsigned long long)data->alloc_bytes / 1024,
1135                        data->nr_alloc, data->order,
1136                        migrate_type_str[data->migrate_type],
1137                        gfp_len, compact_gfp_string(data->gfp_flags), caller);
1138
1139                 next = rb_next(next);
1140         }
1141
1142         if (n_lines == -1) {
1143                 printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
1144                        gfp_len, "...");
1145         }
1146
1147         printf("%.105s\n", graph_dotted_line);
1148 }
1149
1150 static void print_gfp_flags(void)
1151 {
1152         int i;
1153
1154         printf("#\n");
1155         printf("# GFP flags\n");
1156         printf("# ---------\n");
1157         for (i = 0; i < nr_gfps; i++) {
1158                 printf("# %08x: %*s: %s\n", gfps[i].flags,
1159                        (int) max_gfp_len, gfps[i].compact_str,
1160                        gfps[i].human_readable);
1161         }
1162 }
1163
1164 static void print_slab_summary(void)
1165 {
1166         printf("\nSUMMARY (SLAB allocator)");
1167         printf("\n========================\n");
1168         printf("Total bytes requested: %'lu\n", total_requested);
1169         printf("Total bytes allocated: %'lu\n", total_allocated);
1170         printf("Total bytes freed:     %'lu\n", total_freed);
1171         if (total_allocated > total_freed) {
1172                 printf("Net total bytes allocated: %'lu\n",
1173                 total_allocated - total_freed);
1174         }
1175         printf("Total bytes wasted on internal fragmentation: %'lu\n",
1176                total_allocated - total_requested);
1177         printf("Internal fragmentation: %f%%\n",
1178                fragmentation(total_requested, total_allocated));
1179         printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1180 }
1181
1182 static void print_page_summary(void)
1183 {
1184         int o, m;
1185         u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1186         u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1187
1188         printf("\nSUMMARY (page allocator)");
1189         printf("\n========================\n");
1190         printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1191                nr_page_allocs, total_page_alloc_bytes / 1024);
1192         printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free requests",
1193                nr_page_frees, total_page_free_bytes / 1024);
1194         printf("\n");
1195
1196         printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1197                nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1198         printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1199                nr_page_allocs - nr_alloc_freed,
1200                (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1201         printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1202                nr_page_nomatch, total_page_nomatch_bytes / 1024);
1203         printf("\n");
1204
1205         printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1206                nr_page_fails, total_page_fail_bytes / 1024);
1207         printf("\n");
1208
1209         printf("%5s  %12s  %12s  %12s  %12s  %12s\n", "Order",  "Unmovable",
1210                "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1211         printf("%.5s  %.12s  %.12s  %.12s  %.12s  %.12s\n", graph_dotted_line,
1212                graph_dotted_line, graph_dotted_line, graph_dotted_line,
1213                graph_dotted_line, graph_dotted_line);
1214
1215         for (o = 0; o < MAX_PAGE_ORDER; o++) {
1216                 printf("%5d", o);
1217                 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1218                         if (order_stats[o][m])
1219                                 printf("  %'12d", order_stats[o][m]);
1220                         else
1221                                 printf("  %12c", '.');
1222                 }
1223                 printf("\n");
1224         }
1225 }
1226
1227 static void print_slab_result(struct perf_session *session)
1228 {
1229         if (caller_flag)
1230                 __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1231         if (alloc_flag)
1232                 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1233         print_slab_summary();
1234 }
1235
1236 static void print_page_result(struct perf_session *session)
1237 {
1238         if (caller_flag || alloc_flag)
1239                 print_gfp_flags();
1240         if (caller_flag)
1241                 __print_page_caller_result(session, caller_lines);
1242         if (alloc_flag)
1243                 __print_page_alloc_result(session, alloc_lines);
1244         print_page_summary();
1245 }
1246
1247 static void print_result(struct perf_session *session)
1248 {
1249         if (kmem_slab)
1250                 print_slab_result(session);
1251         if (kmem_page)
1252                 print_page_result(session);
1253 }
1254
1255 static LIST_HEAD(slab_caller_sort);
1256 static LIST_HEAD(slab_alloc_sort);
1257 static LIST_HEAD(page_caller_sort);
1258 static LIST_HEAD(page_alloc_sort);
1259
1260 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1261                              struct list_head *sort_list)
1262 {
1263         struct rb_node **new = &(root->rb_node);
1264         struct rb_node *parent = NULL;
1265         struct sort_dimension *sort;
1266
1267         while (*new) {
1268                 struct alloc_stat *this;
1269                 int cmp = 0;
1270
1271                 this = rb_entry(*new, struct alloc_stat, node);
1272                 parent = *new;
1273
1274                 list_for_each_entry(sort, sort_list, list) {
1275                         cmp = sort->cmp(data, this);
1276                         if (cmp)
1277                                 break;
1278                 }
1279
1280                 if (cmp > 0)
1281                         new = &((*new)->rb_left);
1282                 else
1283                         new = &((*new)->rb_right);
1284         }
1285
1286         rb_link_node(&data->node, parent, new);
1287         rb_insert_color(&data->node, root);
1288 }
1289
1290 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1291                                struct list_head *sort_list)
1292 {
1293         struct rb_node *node;
1294         struct alloc_stat *data;
1295
1296         for (;;) {
1297                 node = rb_first(root);
1298                 if (!node)
1299                         break;
1300
1301                 rb_erase(node, root);
1302                 data = rb_entry(node, struct alloc_stat, node);
1303                 sort_slab_insert(root_sorted, data, sort_list);
1304         }
1305 }
1306
1307 static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1308                              struct list_head *sort_list)
1309 {
1310         struct rb_node **new = &root->rb_node;
1311         struct rb_node *parent = NULL;
1312         struct sort_dimension *sort;
1313
1314         while (*new) {
1315                 struct page_stat *this;
1316                 int cmp = 0;
1317
1318                 this = rb_entry(*new, struct page_stat, node);
1319                 parent = *new;
1320
1321                 list_for_each_entry(sort, sort_list, list) {
1322                         cmp = sort->cmp(data, this);
1323                         if (cmp)
1324                                 break;
1325                 }
1326
1327                 if (cmp > 0)
1328                         new = &parent->rb_left;
1329                 else
1330                         new = &parent->rb_right;
1331         }
1332
1333         rb_link_node(&data->node, parent, new);
1334         rb_insert_color(&data->node, root);
1335 }
1336
1337 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1338                                struct list_head *sort_list)
1339 {
1340         struct rb_node *node;
1341         struct page_stat *data;
1342
1343         for (;;) {
1344                 node = rb_first(root);
1345                 if (!node)
1346                         break;
1347
1348                 rb_erase(node, root);
1349                 data = rb_entry(node, struct page_stat, node);
1350                 sort_page_insert(root_sorted, data, sort_list);
1351         }
1352 }
1353
1354 static void sort_result(void)
1355 {
1356         if (kmem_slab) {
1357                 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1358                                    &slab_alloc_sort);
1359                 __sort_slab_result(&root_caller_stat, &root_caller_sorted,
1360                                    &slab_caller_sort);
1361         }
1362         if (kmem_page) {
1363                 if (live_page)
1364                         __sort_page_result(&page_live_tree, &page_alloc_sorted,
1365                                            &page_alloc_sort);
1366                 else
1367                         __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1368                                            &page_alloc_sort);
1369
1370                 __sort_page_result(&page_caller_tree, &page_caller_sorted,
1371                                    &page_caller_sort);
1372         }
1373 }
1374
1375 static int __cmd_kmem(struct perf_session *session)
1376 {
1377         int err = -EINVAL;
1378         struct evsel *evsel;
1379         const struct evsel_str_handler kmem_tracepoints[] = {
1380                 /* slab allocator */
1381                 { "kmem:kmalloc",               evsel__process_alloc_event, },
1382                 { "kmem:kmem_cache_alloc",      evsel__process_alloc_event, },
1383                 { "kmem:kmalloc_node",          evsel__process_alloc_event, },
1384                 { "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
1385                 { "kmem:kfree",                 evsel__process_free_event, },
1386                 { "kmem:kmem_cache_free",       evsel__process_free_event, },
1387                 /* page allocator */
1388                 { "kmem:mm_page_alloc",         evsel__process_page_alloc_event, },
1389                 { "kmem:mm_page_free",          evsel__process_page_free_event, },
1390         };
1391
1392         if (!perf_session__has_traces(session, "kmem record"))
1393                 goto out;
1394
1395         if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1396                 pr_err("Initializing perf session tracepoint handlers failed\n");
1397                 goto out;
1398         }
1399
1400         evlist__for_each_entry(session->evlist, evsel) {
1401                 if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") &&
1402                     evsel__field(evsel, "pfn")) {
1403                         use_pfn = true;
1404                         break;
1405                 }
1406         }
1407
1408         setup_pager();
1409         err = perf_session__process_events(session);
1410         if (err != 0) {
1411                 pr_err("error during process events: %d\n", err);
1412                 goto out;
1413         }
1414         sort_result();
1415         print_result(session);
1416 out:
1417         return err;
1418 }
1419
1420 /* slab sort keys */
1421 static int ptr_cmp(void *a, void *b)
1422 {
1423         struct alloc_stat *l = a;
1424         struct alloc_stat *r = b;
1425
1426         if (l->ptr < r->ptr)
1427                 return -1;
1428         else if (l->ptr > r->ptr)
1429                 return 1;
1430         return 0;
1431 }
1432
1433 static struct sort_dimension ptr_sort_dimension = {
1434         .name   = "ptr",
1435         .cmp    = ptr_cmp,
1436 };
1437
1438 static int slab_callsite_cmp(void *a, void *b)
1439 {
1440         struct alloc_stat *l = a;
1441         struct alloc_stat *r = b;
1442
1443         if (l->call_site < r->call_site)
1444                 return -1;
1445         else if (l->call_site > r->call_site)
1446                 return 1;
1447         return 0;
1448 }
1449
1450 static struct sort_dimension callsite_sort_dimension = {
1451         .name   = "callsite",
1452         .cmp    = slab_callsite_cmp,
1453 };
1454
1455 static int hit_cmp(void *a, void *b)
1456 {
1457         struct alloc_stat *l = a;
1458         struct alloc_stat *r = b;
1459
1460         if (l->hit < r->hit)
1461                 return -1;
1462         else if (l->hit > r->hit)
1463                 return 1;
1464         return 0;
1465 }
1466
1467 static struct sort_dimension hit_sort_dimension = {
1468         .name   = "hit",
1469         .cmp    = hit_cmp,
1470 };
1471
1472 static int bytes_cmp(void *a, void *b)
1473 {
1474         struct alloc_stat *l = a;
1475         struct alloc_stat *r = b;
1476
1477         if (l->bytes_alloc < r->bytes_alloc)
1478                 return -1;
1479         else if (l->bytes_alloc > r->bytes_alloc)
1480                 return 1;
1481         return 0;
1482 }
1483
1484 static struct sort_dimension bytes_sort_dimension = {
1485         .name   = "bytes",
1486         .cmp    = bytes_cmp,
1487 };
1488
1489 static int frag_cmp(void *a, void *b)
1490 {
1491         double x, y;
1492         struct alloc_stat *l = a;
1493         struct alloc_stat *r = b;
1494
1495         x = fragmentation(l->bytes_req, l->bytes_alloc);
1496         y = fragmentation(r->bytes_req, r->bytes_alloc);
1497
1498         if (x < y)
1499                 return -1;
1500         else if (x > y)
1501                 return 1;
1502         return 0;
1503 }
1504
1505 static struct sort_dimension frag_sort_dimension = {
1506         .name   = "frag",
1507         .cmp    = frag_cmp,
1508 };
1509
1510 static int pingpong_cmp(void *a, void *b)
1511 {
1512         struct alloc_stat *l = a;
1513         struct alloc_stat *r = b;
1514
1515         if (l->pingpong < r->pingpong)
1516                 return -1;
1517         else if (l->pingpong > r->pingpong)
1518                 return 1;
1519         return 0;
1520 }
1521
1522 static struct sort_dimension pingpong_sort_dimension = {
1523         .name   = "pingpong",
1524         .cmp    = pingpong_cmp,
1525 };
1526
1527 /* page sort keys */
1528 static int page_cmp(void *a, void *b)
1529 {
1530         struct page_stat *l = a;
1531         struct page_stat *r = b;
1532
1533         if (l->page < r->page)
1534                 return -1;
1535         else if (l->page > r->page)
1536                 return 1;
1537         return 0;
1538 }
1539
1540 static struct sort_dimension page_sort_dimension = {
1541         .name   = "page",
1542         .cmp    = page_cmp,
1543 };
1544
1545 static int page_callsite_cmp(void *a, void *b)
1546 {
1547         struct page_stat *l = a;
1548         struct page_stat *r = b;
1549
1550         if (l->callsite < r->callsite)
1551                 return -1;
1552         else if (l->callsite > r->callsite)
1553                 return 1;
1554         return 0;
1555 }
1556
1557 static struct sort_dimension page_callsite_sort_dimension = {
1558         .name   = "callsite",
1559         .cmp    = page_callsite_cmp,
1560 };
1561
1562 static int page_hit_cmp(void *a, void *b)
1563 {
1564         struct page_stat *l = a;
1565         struct page_stat *r = b;
1566
1567         if (l->nr_alloc < r->nr_alloc)
1568                 return -1;
1569         else if (l->nr_alloc > r->nr_alloc)
1570                 return 1;
1571         return 0;
1572 }
1573
1574 static struct sort_dimension page_hit_sort_dimension = {
1575         .name   = "hit",
1576         .cmp    = page_hit_cmp,
1577 };
1578
1579 static int page_bytes_cmp(void *a, void *b)
1580 {
1581         struct page_stat *l = a;
1582         struct page_stat *r = b;
1583
1584         if (l->alloc_bytes < r->alloc_bytes)
1585                 return -1;
1586         else if (l->alloc_bytes > r->alloc_bytes)
1587                 return 1;
1588         return 0;
1589 }
1590
1591 static struct sort_dimension page_bytes_sort_dimension = {
1592         .name   = "bytes",
1593         .cmp    = page_bytes_cmp,
1594 };
1595
1596 static int page_order_cmp(void *a, void *b)
1597 {
1598         struct page_stat *l = a;
1599         struct page_stat *r = b;
1600
1601         if (l->order < r->order)
1602                 return -1;
1603         else if (l->order > r->order)
1604                 return 1;
1605         return 0;
1606 }
1607
1608 static struct sort_dimension page_order_sort_dimension = {
1609         .name   = "order",
1610         .cmp    = page_order_cmp,
1611 };
1612
1613 static int migrate_type_cmp(void *a, void *b)
1614 {
1615         struct page_stat *l = a;
1616         struct page_stat *r = b;
1617
1618         /* for internal use to find free'd page */
1619         if (l->migrate_type == -1U)
1620                 return 0;
1621
1622         if (l->migrate_type < r->migrate_type)
1623                 return -1;
1624         else if (l->migrate_type > r->migrate_type)
1625                 return 1;
1626         return 0;
1627 }
1628
1629 static struct sort_dimension migrate_type_sort_dimension = {
1630         .name   = "migtype",
1631         .cmp    = migrate_type_cmp,
1632 };
1633
1634 static int gfp_flags_cmp(void *a, void *b)
1635 {
1636         struct page_stat *l = a;
1637         struct page_stat *r = b;
1638
1639         /* for internal use to find free'd page */
1640         if (l->gfp_flags == -1U)
1641                 return 0;
1642
1643         if (l->gfp_flags < r->gfp_flags)
1644                 return -1;
1645         else if (l->gfp_flags > r->gfp_flags)
1646                 return 1;
1647         return 0;
1648 }
1649
1650 static struct sort_dimension gfp_flags_sort_dimension = {
1651         .name   = "gfp",
1652         .cmp    = gfp_flags_cmp,
1653 };
1654
1655 static struct sort_dimension *slab_sorts[] = {
1656         &ptr_sort_dimension,
1657         &callsite_sort_dimension,
1658         &hit_sort_dimension,
1659         &bytes_sort_dimension,
1660         &frag_sort_dimension,
1661         &pingpong_sort_dimension,
1662 };
1663
1664 static struct sort_dimension *page_sorts[] = {
1665         &page_sort_dimension,
1666         &page_callsite_sort_dimension,
1667         &page_hit_sort_dimension,
1668         &page_bytes_sort_dimension,
1669         &page_order_sort_dimension,
1670         &migrate_type_sort_dimension,
1671         &gfp_flags_sort_dimension,
1672 };
1673
1674 static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1675 {
1676         struct sort_dimension *sort;
1677         int i;
1678
1679         for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1680                 if (!strcmp(slab_sorts[i]->name, tok)) {
1681                         sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1682                         if (!sort) {
1683                                 pr_err("%s: memdup failed\n", __func__);
1684                                 return -1;
1685                         }
1686                         list_add_tail(&sort->list, list);
1687                         return 0;
1688                 }
1689         }
1690
1691         return -1;
1692 }
1693
1694 static int page_sort_dimension__add(const char *tok, struct list_head *list)
1695 {
1696         struct sort_dimension *sort;
1697         int i;
1698
1699         for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1700                 if (!strcmp(page_sorts[i]->name, tok)) {
1701                         sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1702                         if (!sort) {
1703                                 pr_err("%s: memdup failed\n", __func__);
1704                                 return -1;
1705                         }
1706                         list_add_tail(&sort->list, list);
1707                         return 0;
1708                 }
1709         }
1710
1711         return -1;
1712 }
1713
1714 static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1715 {
1716         char *tok;
1717         char *str = strdup(arg);
1718         char *pos = str;
1719
1720         if (!str) {
1721                 pr_err("%s: strdup failed\n", __func__);
1722                 return -1;
1723         }
1724
1725         while (true) {
1726                 tok = strsep(&pos, ",");
1727                 if (!tok)
1728                         break;
1729                 if (slab_sort_dimension__add(tok, sort_list) < 0) {
1730                         pr_err("Unknown slab --sort key: '%s'", tok);
1731                         free(str);
1732                         return -1;
1733                 }
1734         }
1735
1736         free(str);
1737         return 0;
1738 }
1739
1740 static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1741 {
1742         char *tok;
1743         char *str = strdup(arg);
1744         char *pos = str;
1745
1746         if (!str) {
1747                 pr_err("%s: strdup failed\n", __func__);
1748                 return -1;
1749         }
1750
1751         while (true) {
1752                 tok = strsep(&pos, ",");
1753                 if (!tok)
1754                         break;
1755                 if (page_sort_dimension__add(tok, sort_list) < 0) {
1756                         pr_err("Unknown page --sort key: '%s'", tok);
1757                         free(str);
1758                         return -1;
1759                 }
1760         }
1761
1762         free(str);
1763         return 0;
1764 }
1765
1766 static int parse_sort_opt(const struct option *opt __maybe_unused,
1767                           const char *arg, int unset __maybe_unused)
1768 {
1769         if (!arg)
1770                 return -1;
1771
1772         if (kmem_page > kmem_slab ||
1773             (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1774                 if (caller_flag > alloc_flag)
1775                         return setup_page_sorting(&page_caller_sort, arg);
1776                 else
1777                         return setup_page_sorting(&page_alloc_sort, arg);
1778         } else {
1779                 if (caller_flag > alloc_flag)
1780                         return setup_slab_sorting(&slab_caller_sort, arg);
1781                 else
1782                         return setup_slab_sorting(&slab_alloc_sort, arg);
1783         }
1784
1785         return 0;
1786 }
1787
1788 static int parse_caller_opt(const struct option *opt __maybe_unused,
1789                             const char *arg __maybe_unused,
1790                             int unset __maybe_unused)
1791 {
1792         caller_flag = (alloc_flag + 1);
1793         return 0;
1794 }
1795
1796 static int parse_alloc_opt(const struct option *opt __maybe_unused,
1797                            const char *arg __maybe_unused,
1798                            int unset __maybe_unused)
1799 {
1800         alloc_flag = (caller_flag + 1);
1801         return 0;
1802 }
1803
1804 static int parse_slab_opt(const struct option *opt __maybe_unused,
1805                           const char *arg __maybe_unused,
1806                           int unset __maybe_unused)
1807 {
1808         kmem_slab = (kmem_page + 1);
1809         return 0;
1810 }
1811
1812 static int parse_page_opt(const struct option *opt __maybe_unused,
1813                           const char *arg __maybe_unused,
1814                           int unset __maybe_unused)
1815 {
1816         kmem_page = (kmem_slab + 1);
1817         return 0;
1818 }
1819
1820 static int parse_line_opt(const struct option *opt __maybe_unused,
1821                           const char *arg, int unset __maybe_unused)
1822 {
1823         int lines;
1824
1825         if (!arg)
1826                 return -1;
1827
1828         lines = strtoul(arg, NULL, 10);
1829
1830         if (caller_flag > alloc_flag)
1831                 caller_lines = lines;
1832         else
1833                 alloc_lines = lines;
1834
1835         return 0;
1836 }
1837
1838 static bool slab_legacy_tp_is_exposed(void)
1839 {
1840         /*
1841          * The tracepoints "kmem:kmalloc_node" and
1842          * "kmem:kmem_cache_alloc_node" have been removed on the latest
1843          * kernel, if the tracepoint "kmem:kmalloc_node" is existed it
1844          * means the tool is running on an old kernel, we need to
1845          * rollback to support these legacy tracepoints.
1846          */
1847         return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
1848                 false : true;
1849 }
1850
1851 static int __cmd_record(int argc, const char **argv)
1852 {
1853         const char * const record_args[] = {
1854         "record", "-a", "-R", "-c", "1",
1855         };
1856         const char * const slab_events[] = {
1857         "-e", "kmem:kmalloc",
1858         "-e", "kmem:kfree",
1859         "-e", "kmem:kmem_cache_alloc",
1860         "-e", "kmem:kmem_cache_free",
1861         };
1862         const char * const slab_legacy_events[] = {
1863         "-e", "kmem:kmalloc_node",
1864         "-e", "kmem:kmem_cache_alloc_node",
1865         };
1866         const char * const page_events[] = {
1867         "-e", "kmem:mm_page_alloc",
1868         "-e", "kmem:mm_page_free",
1869         };
1870         unsigned int rec_argc, i, j;
1871         const char **rec_argv;
1872         unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
1873
1874         rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1875         if (kmem_slab) {
1876                 rec_argc += ARRAY_SIZE(slab_events);
1877                 if (slab_legacy_tp_exposed)
1878                         rec_argc += ARRAY_SIZE(slab_legacy_events);
1879         }
1880         if (kmem_page)
1881                 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1882
1883         rec_argv = calloc(rec_argc + 1, sizeof(char *));
1884
1885         if (rec_argv == NULL)
1886                 return -ENOMEM;
1887
1888         for (i = 0; i < ARRAY_SIZE(record_args); i++)
1889                 rec_argv[i] = strdup(record_args[i]);
1890
1891         if (kmem_slab) {
1892                 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1893                         rec_argv[i] = strdup(slab_events[j]);
1894                 if (slab_legacy_tp_exposed) {
1895                         for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
1896                                 rec_argv[i] = strdup(slab_legacy_events[j]);
1897                 }
1898         }
1899         if (kmem_page) {
1900                 rec_argv[i++] = strdup("-g");
1901
1902                 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1903                         rec_argv[i] = strdup(page_events[j]);
1904         }
1905
1906         for (j = 1; j < (unsigned int)argc; j++, i++)
1907                 rec_argv[i] = argv[j];
1908
1909         return cmd_record(i, rec_argv);
1910 }
1911
1912 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1913 {
1914         if (!strcmp(var, "kmem.default")) {
1915                 if (!strcmp(value, "slab"))
1916                         kmem_default = KMEM_SLAB;
1917                 else if (!strcmp(value, "page"))
1918                         kmem_default = KMEM_PAGE;
1919                 else
1920                         pr_err("invalid default value ('slab' or 'page' required): %s\n",
1921                                value);
1922                 return 0;
1923         }
1924
1925         return 0;
1926 }
1927
1928 int cmd_kmem(int argc, const char **argv)
1929 {
1930         const char * const default_slab_sort = "frag,hit,bytes";
1931         const char * const default_page_sort = "bytes,hit";
1932         struct perf_data data = {
1933                 .mode = PERF_DATA_MODE_READ,
1934         };
1935         const struct option kmem_options[] = {
1936         OPT_STRING('i', "input", &input_name, "file", "input file name"),
1937         OPT_INCR('v', "verbose", &verbose,
1938                     "be more verbose (show symbol address, etc)"),
1939         OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1940                            "show per-callsite statistics", parse_caller_opt),
1941         OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1942                            "show per-allocation statistics", parse_alloc_opt),
1943         OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1944                      "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1945                      "page, order, migtype, gfp", parse_sort_opt),
1946         OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1947         OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1948         OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1949         OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1950                            parse_slab_opt),
1951         OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1952                            parse_page_opt),
1953         OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1954         OPT_STRING(0, "time", &time_str, "str",
1955                    "Time span of interest (start,stop)"),
1956         OPT_END()
1957         };
1958         const char *const kmem_subcommands[] = { "record", "stat", NULL };
1959         const char *kmem_usage[] = {
1960                 NULL,
1961                 NULL
1962         };
1963         struct perf_session *session;
1964         static const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
1965         int ret = perf_config(kmem_config, NULL);
1966
1967         if (ret)
1968                 return ret;
1969
1970         argc = parse_options_subcommand(argc, argv, kmem_options,
1971                                         kmem_subcommands, kmem_usage,
1972                                         PARSE_OPT_STOP_AT_NON_OPTION);
1973
1974         if (!argc)
1975                 usage_with_options(kmem_usage, kmem_options);
1976
1977         if (kmem_slab == 0 && kmem_page == 0) {
1978                 if (kmem_default == KMEM_SLAB)
1979                         kmem_slab = 1;
1980                 else
1981                         kmem_page = 1;
1982         }
1983
1984         if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
1985                 symbol__init(NULL);
1986                 return __cmd_record(argc, argv);
1987         }
1988
1989         data.path = input_name;
1990
1991         kmem_session = session = perf_session__new(&data, &perf_kmem);
1992         if (IS_ERR(session))
1993                 return PTR_ERR(session);
1994
1995         ret = -1;
1996
1997         if (kmem_slab) {
1998                 if (!evlist__find_tracepoint_by_name(session->evlist, "kmem:kmalloc")) {
1999                         pr_err(errmsg, "slab", "slab");
2000                         goto out_delete;
2001                 }
2002         }
2003
2004         if (kmem_page) {
2005                 struct evsel *evsel = evlist__find_tracepoint_by_name(session->evlist, "kmem:mm_page_alloc");
2006
2007                 if (evsel == NULL) {
2008                         pr_err(errmsg, "page", "page");
2009                         goto out_delete;
2010                 }
2011
2012                 kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
2013                 symbol_conf.use_callchain = true;
2014         }
2015
2016         symbol__init(&session->header.env);
2017
2018         if (perf_time__parse_str(&ptime, time_str) != 0) {
2019                 pr_err("Invalid time string\n");
2020                 ret = -EINVAL;
2021                 goto out_delete;
2022         }
2023
2024         if (!strcmp(argv[0], "stat")) {
2025                 setlocale(LC_ALL, "");
2026
2027                 if (cpu__setup_cpunode_map())
2028                         goto out_delete;
2029
2030                 if (list_empty(&slab_caller_sort))
2031                         setup_slab_sorting(&slab_caller_sort, default_slab_sort);
2032                 if (list_empty(&slab_alloc_sort))
2033                         setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
2034                 if (list_empty(&page_caller_sort))
2035                         setup_page_sorting(&page_caller_sort, default_page_sort);
2036                 if (list_empty(&page_alloc_sort))
2037                         setup_page_sorting(&page_alloc_sort, default_page_sort);
2038
2039                 if (kmem_page) {
2040                         setup_page_sorting(&page_alloc_sort_input,
2041                                            "page,order,migtype,gfp");
2042                         setup_page_sorting(&page_caller_sort_input,
2043                                            "callsite,order,migtype,gfp");
2044                 }
2045                 ret = __cmd_kmem(session);
2046         } else
2047                 usage_with_options(kmem_usage, kmem_options);
2048
2049 out_delete:
2050         perf_session__delete(session);
2051
2052         return ret;
2053 }
2054