1 // SPDX-License-Identifier: GPL-2.0
6 #include "util/evlist.h"
7 #include "util/evsel.h"
8 #include "util/config.h"
10 #include "util/symbol.h"
11 #include "util/thread.h"
12 #include "util/header.h"
13 #include "util/session.h"
14 #include "util/tool.h"
15 #include "util/callchain.h"
16 #include "util/time-utils.h"
17 #include <linux/err.h>
19 #include <subcmd/pager.h>
20 #include <subcmd/parse-options.h>
21 #include "util/trace-event.h"
22 #include "util/data.h"
23 #include "util/cpumap.h"
25 #include "util/debug.h"
26 #include "util/string2.h"
28 #include <linux/kernel.h>
29 #include <linux/numa.h>
30 #include <linux/rbtree.h>
31 #include <linux/string.h>
32 #include <linux/zalloc.h>
38 #include <linux/ctype.h>
43 static long kmem_page_size;
47 } kmem_default = KMEM_SLAB; /* for backward compatibility */
50 typedef int (*sort_fn_t)(void *, void *);
52 static int alloc_flag;
53 static int caller_flag;
55 static int alloc_lines = -1;
56 static int caller_lines = -1;
74 static struct rb_root root_alloc_stat;
75 static struct rb_root root_alloc_sorted;
76 static struct rb_root root_caller_stat;
77 static struct rb_root root_caller_sorted;
79 static unsigned long total_requested, total_allocated, total_freed;
80 static unsigned long nr_allocs, nr_cross_allocs;
82 /* filters for controlling start and stop of time of analysis */
83 static struct perf_time_interval ptime;
86 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
87 int bytes_req, int bytes_alloc, int cpu)
89 struct rb_node **node = &root_alloc_stat.rb_node;
90 struct rb_node *parent = NULL;
91 struct alloc_stat *data = NULL;
95 data = rb_entry(*node, struct alloc_stat, node);
98 node = &(*node)->rb_right;
99 else if (ptr < data->ptr)
100 node = &(*node)->rb_left;
105 if (data && data->ptr == ptr) {
107 data->bytes_req += bytes_req;
108 data->bytes_alloc += bytes_alloc;
110 data = malloc(sizeof(*data));
112 pr_err("%s: malloc failed\n", __func__);
118 data->bytes_req = bytes_req;
119 data->bytes_alloc = bytes_alloc;
121 rb_link_node(&data->node, parent, node);
122 rb_insert_color(&data->node, &root_alloc_stat);
124 data->call_site = call_site;
125 data->alloc_cpu = cpu;
126 data->last_alloc = bytes_alloc;
131 static int insert_caller_stat(unsigned long call_site,
132 int bytes_req, int bytes_alloc)
134 struct rb_node **node = &root_caller_stat.rb_node;
135 struct rb_node *parent = NULL;
136 struct alloc_stat *data = NULL;
140 data = rb_entry(*node, struct alloc_stat, node);
142 if (call_site > data->call_site)
143 node = &(*node)->rb_right;
144 else if (call_site < data->call_site)
145 node = &(*node)->rb_left;
150 if (data && data->call_site == call_site) {
152 data->bytes_req += bytes_req;
153 data->bytes_alloc += bytes_alloc;
155 data = malloc(sizeof(*data));
157 pr_err("%s: malloc failed\n", __func__);
160 data->call_site = call_site;
163 data->bytes_req = bytes_req;
164 data->bytes_alloc = bytes_alloc;
166 rb_link_node(&data->node, parent, node);
167 rb_insert_color(&data->node, &root_caller_stat);
173 static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *sample)
175 unsigned long ptr = evsel__intval(evsel, sample, "ptr"),
176 call_site = evsel__intval(evsel, sample, "call_site");
177 int bytes_req = evsel__intval(evsel, sample, "bytes_req"),
178 bytes_alloc = evsel__intval(evsel, sample, "bytes_alloc");
180 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
181 insert_caller_stat(call_site, bytes_req, bytes_alloc))
184 total_requested += bytes_req;
185 total_allocated += bytes_alloc;
190 * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
191 * version of tracepoints") adds the field "node" into the
192 * tracepoints 'kmalloc' and 'kmem_cache_alloc'.
194 * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
195 * also contain the field "node".
197 * If the tracepoint contains the field "node" the tool stats the
200 if (evsel__field(evsel, "node")) {
203 node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
204 node2 = evsel__intval(evsel, sample, "node");
207 * If the field "node" is NUMA_NO_NODE (-1), we don't take it
208 * as a cross allocation.
210 if ((node2 != NUMA_NO_NODE) && (node1 != node2))
217 static int ptr_cmp(void *, void *);
218 static int slab_callsite_cmp(void *, void *);
220 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
221 unsigned long call_site,
222 struct rb_root *root,
225 struct rb_node *node = root->rb_node;
226 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
229 struct alloc_stat *data;
232 data = rb_entry(node, struct alloc_stat, node);
234 cmp = sort_fn(&key, data);
236 node = node->rb_left;
238 node = node->rb_right;
245 static int evsel__process_free_event(struct evsel *evsel, struct perf_sample *sample)
247 unsigned long ptr = evsel__intval(evsel, sample, "ptr");
248 struct alloc_stat *s_alloc, *s_caller;
250 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
254 total_freed += s_alloc->last_alloc;
256 if ((short)sample->cpu != s_alloc->alloc_cpu) {
259 s_caller = search_alloc_stat(0, s_alloc->call_site,
264 s_caller->pingpong++;
266 s_alloc->alloc_cpu = -1;
271 static u64 total_page_alloc_bytes;
272 static u64 total_page_free_bytes;
273 static u64 total_page_nomatch_bytes;
274 static u64 total_page_fail_bytes;
275 static unsigned long nr_page_allocs;
276 static unsigned long nr_page_frees;
277 static unsigned long nr_page_fails;
278 static unsigned long nr_page_nomatch;
281 static bool live_page;
282 static struct perf_session *kmem_session;
284 #define MAX_MIGRATE_TYPES 6
285 #define MAX_PAGE_ORDER 11
287 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
295 unsigned migrate_type;
302 static struct rb_root page_live_tree;
303 static struct rb_root page_alloc_tree;
304 static struct rb_root page_alloc_sorted;
305 static struct rb_root page_caller_tree;
306 static struct rb_root page_caller_sorted;
314 static int nr_alloc_funcs;
315 static struct alloc_func *alloc_func_list;
317 static int funcmp(const void *a, const void *b)
319 const struct alloc_func *fa = a;
320 const struct alloc_func *fb = b;
322 if (fa->start > fb->start)
328 static int callcmp(const void *a, const void *b)
330 const struct alloc_func *fa = a;
331 const struct alloc_func *fb = b;
333 if (fb->start <= fa->start && fa->end < fb->end)
336 if (fa->start > fb->start)
342 static int build_alloc_func_list(void)
345 struct map *kernel_map;
347 struct rb_node *node;
348 struct alloc_func *func;
349 struct machine *machine = &kmem_session->machines.host;
350 regex_t alloc_func_regex;
351 static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
353 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
357 regerror(ret, &alloc_func_regex, err, sizeof(err));
358 pr_err("Invalid regex: %s\n%s", pattern, err);
362 kernel_map = machine__kernel_map(machine);
363 if (map__load(kernel_map) < 0) {
364 pr_err("cannot load kernel map\n");
368 map__for_each_symbol(kernel_map, sym, node) {
369 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
372 func = realloc(alloc_func_list,
373 (nr_alloc_funcs + 1) * sizeof(*func));
377 pr_debug("alloc func: %s\n", sym->name);
378 func[nr_alloc_funcs].start = sym->start;
379 func[nr_alloc_funcs].end = sym->end;
380 func[nr_alloc_funcs].name = sym->name;
382 alloc_func_list = func;
386 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
388 regfree(&alloc_func_regex);
393 * Find first non-memory allocation function from callchain.
394 * The allocation functions are in the 'alloc_func_list'.
396 static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
398 struct addr_location al;
399 struct machine *machine = &kmem_session->machines.host;
400 struct callchain_cursor_node *node;
402 if (alloc_func_list == NULL) {
403 if (build_alloc_func_list() < 0)
407 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
408 sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
410 callchain_cursor_commit(&callchain_cursor);
412 struct alloc_func key, *caller;
415 node = callchain_cursor_current(&callchain_cursor);
419 key.start = key.end = node->ip;
420 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
421 sizeof(key), callcmp);
425 addr = map__unmap_ip(node->ms.map, node->ip);
431 pr_debug3("skipping alloc function: %s\n", caller->name);
433 callchain_cursor_advance(&callchain_cursor);
437 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
441 struct sort_dimension {
444 struct list_head list;
447 static LIST_HEAD(page_alloc_sort_input);
448 static LIST_HEAD(page_caller_sort_input);
450 static struct page_stat *
451 __page_stat__findnew_page(struct page_stat *pstat, bool create)
453 struct rb_node **node = &page_live_tree.rb_node;
454 struct rb_node *parent = NULL;
455 struct page_stat *data;
461 data = rb_entry(*node, struct page_stat, node);
463 cmp = data->page - pstat->page;
465 node = &parent->rb_left;
467 node = &parent->rb_right;
475 data = zalloc(sizeof(*data));
477 data->page = pstat->page;
478 data->order = pstat->order;
479 data->gfp_flags = pstat->gfp_flags;
480 data->migrate_type = pstat->migrate_type;
482 rb_link_node(&data->node, parent, node);
483 rb_insert_color(&data->node, &page_live_tree);
489 static struct page_stat *page_stat__find_page(struct page_stat *pstat)
491 return __page_stat__findnew_page(pstat, false);
494 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
496 return __page_stat__findnew_page(pstat, true);
499 static struct page_stat *
500 __page_stat__findnew_alloc(struct page_stat *pstat, bool create)
502 struct rb_node **node = &page_alloc_tree.rb_node;
503 struct rb_node *parent = NULL;
504 struct page_stat *data;
505 struct sort_dimension *sort;
511 data = rb_entry(*node, struct page_stat, node);
513 list_for_each_entry(sort, &page_alloc_sort_input, list) {
514 cmp = sort->cmp(pstat, data);
520 node = &parent->rb_left;
522 node = &parent->rb_right;
530 data = zalloc(sizeof(*data));
532 data->page = pstat->page;
533 data->order = pstat->order;
534 data->gfp_flags = pstat->gfp_flags;
535 data->migrate_type = pstat->migrate_type;
537 rb_link_node(&data->node, parent, node);
538 rb_insert_color(&data->node, &page_alloc_tree);
544 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
546 return __page_stat__findnew_alloc(pstat, false);
549 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
551 return __page_stat__findnew_alloc(pstat, true);
554 static struct page_stat *
555 __page_stat__findnew_caller(struct page_stat *pstat, bool create)
557 struct rb_node **node = &page_caller_tree.rb_node;
558 struct rb_node *parent = NULL;
559 struct page_stat *data;
560 struct sort_dimension *sort;
566 data = rb_entry(*node, struct page_stat, node);
568 list_for_each_entry(sort, &page_caller_sort_input, list) {
569 cmp = sort->cmp(pstat, data);
575 node = &parent->rb_left;
577 node = &parent->rb_right;
585 data = zalloc(sizeof(*data));
587 data->callsite = pstat->callsite;
588 data->order = pstat->order;
589 data->gfp_flags = pstat->gfp_flags;
590 data->migrate_type = pstat->migrate_type;
592 rb_link_node(&data->node, parent, node);
593 rb_insert_color(&data->node, &page_caller_tree);
599 static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
601 return __page_stat__findnew_caller(pstat, false);
604 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
606 return __page_stat__findnew_caller(pstat, true);
609 static bool valid_page(u64 pfn_or_page)
611 if (use_pfn && pfn_or_page == -1UL)
613 if (!use_pfn && pfn_or_page == 0)
621 char *human_readable;
624 static struct gfp_flag *gfps;
627 static int gfpcmp(const void *a, const void *b)
629 const struct gfp_flag *fa = a;
630 const struct gfp_flag *fb = b;
632 return fa->flags - fb->flags;
635 /* see include/trace/events/mmflags.h */
636 static const struct {
637 const char *original;
639 } gfp_compact_table[] = {
640 { "GFP_TRANSHUGE", "THP" },
641 { "GFP_TRANSHUGE_LIGHT", "THL" },
642 { "GFP_HIGHUSER_MOVABLE", "HUM" },
643 { "GFP_HIGHUSER", "HU" },
645 { "GFP_KERNEL_ACCOUNT", "KAC" },
646 { "GFP_KERNEL", "K" },
647 { "GFP_NOFS", "NF" },
648 { "GFP_ATOMIC", "A" },
649 { "GFP_NOIO", "NI" },
650 { "GFP_NOWAIT", "NW" },
652 { "__GFP_HIGHMEM", "HM" },
653 { "GFP_DMA32", "D32" },
654 { "__GFP_HIGH", "H" },
655 { "__GFP_ATOMIC", "_A" },
658 { "__GFP_NOWARN", "NWR" },
659 { "__GFP_RETRY_MAYFAIL", "R" },
660 { "__GFP_NOFAIL", "NF" },
661 { "__GFP_NORETRY", "NR" },
662 { "__GFP_COMP", "C" },
663 { "__GFP_ZERO", "Z" },
664 { "__GFP_NOMEMALLOC", "NMA" },
665 { "__GFP_MEMALLOC", "MA" },
666 { "__GFP_HARDWALL", "HW" },
667 { "__GFP_THISNODE", "TN" },
668 { "__GFP_RECLAIMABLE", "RC" },
669 { "__GFP_MOVABLE", "M" },
670 { "__GFP_ACCOUNT", "AC" },
671 { "__GFP_WRITE", "WR" },
672 { "__GFP_RECLAIM", "R" },
673 { "__GFP_DIRECT_RECLAIM", "DR" },
674 { "__GFP_KSWAPD_RECLAIM", "KR" },
677 static size_t max_gfp_len;
679 static char *compact_gfp_flags(char *gfp_flags)
681 char *orig_flags = strdup(gfp_flags);
682 char *new_flags = NULL;
683 char *str, *pos = NULL;
686 if (orig_flags == NULL)
689 str = strtok_r(orig_flags, "|", &pos);
695 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
696 if (strcmp(gfp_compact_table[i].original, str))
699 cpt = gfp_compact_table[i].compact;
700 new = realloc(new_flags, len + strlen(cpt) + 2);
710 strcpy(new_flags, cpt);
712 strcat(new_flags, "|");
713 strcat(new_flags, cpt);
720 str = strtok_r(NULL, "|", &pos);
723 if (max_gfp_len < len)
730 static char *compact_gfp_string(unsigned long gfp_flags)
732 struct gfp_flag key = {
735 struct gfp_flag *gfp;
737 gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
739 return gfp->compact_str;
744 static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
745 unsigned int gfp_flags)
747 struct tep_record record = {
749 .data = sample->raw_data,
750 .size = sample->raw_size,
752 struct trace_seq seq;
753 char *str, *pos = NULL;
756 struct gfp_flag key = {
760 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
764 trace_seq_init(&seq);
765 tep_print_event(evsel->tp_format->tep,
766 &seq, &record, "%s", TEP_PRINT_INFO);
768 str = strtok_r(seq.buffer, " ", &pos);
770 if (!strncmp(str, "gfp_flags=", 10)) {
771 struct gfp_flag *new;
773 new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
780 new->flags = gfp_flags;
781 new->human_readable = strdup(str + 10);
782 new->compact_str = compact_gfp_flags(str + 10);
783 if (!new->human_readable || !new->compact_str)
786 qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
789 str = strtok_r(NULL, " ", &pos);
792 trace_seq_destroy(&seq);
796 static int evsel__process_page_alloc_event(struct evsel *evsel, struct perf_sample *sample)
799 unsigned int order = evsel__intval(evsel, sample, "order");
800 unsigned int gfp_flags = evsel__intval(evsel, sample, "gfp_flags");
801 unsigned int migrate_type = evsel__intval(evsel, sample,
803 u64 bytes = kmem_page_size << order;
805 struct page_stat *pstat;
806 struct page_stat this = {
808 .gfp_flags = gfp_flags,
809 .migrate_type = migrate_type,
813 page = evsel__intval(evsel, sample, "pfn");
815 page = evsel__intval(evsel, sample, "page");
818 total_page_alloc_bytes += bytes;
820 if (!valid_page(page)) {
822 total_page_fail_bytes += bytes;
827 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
830 callsite = find_callsite(evsel, sample);
833 * This is to find the current page (with correct gfp flags and
834 * migrate type) at free event.
837 pstat = page_stat__findnew_page(&this);
842 pstat->alloc_bytes += bytes;
843 pstat->callsite = callsite;
846 pstat = page_stat__findnew_alloc(&this);
851 pstat->alloc_bytes += bytes;
852 pstat->callsite = callsite;
855 this.callsite = callsite;
856 pstat = page_stat__findnew_caller(&this);
861 pstat->alloc_bytes += bytes;
863 order_stats[order][migrate_type]++;
868 static int evsel__process_page_free_event(struct evsel *evsel, struct perf_sample *sample)
871 unsigned int order = evsel__intval(evsel, sample, "order");
872 u64 bytes = kmem_page_size << order;
873 struct page_stat *pstat;
874 struct page_stat this = {
879 page = evsel__intval(evsel, sample, "pfn");
881 page = evsel__intval(evsel, sample, "page");
884 total_page_free_bytes += bytes;
887 pstat = page_stat__find_page(&this);
889 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
893 total_page_nomatch_bytes += bytes;
898 this.gfp_flags = pstat->gfp_flags;
899 this.migrate_type = pstat->migrate_type;
900 this.callsite = pstat->callsite;
902 rb_erase(&pstat->node, &page_live_tree);
906 order_stats[this.order][this.migrate_type]--;
908 pstat = page_stat__find_alloc(&this);
913 pstat->free_bytes += bytes;
916 pstat = page_stat__find_caller(&this);
921 pstat->free_bytes += bytes;
925 pstat->alloc_bytes -= bytes;
927 if (pstat->nr_alloc == 0) {
928 rb_erase(&pstat->node, &page_caller_tree);
936 static bool perf_kmem__skip_sample(struct perf_sample *sample)
938 /* skip sample based on time? */
939 if (perf_time__skip_sample(&ptime, sample->time))
945 typedef int (*tracepoint_handler)(struct evsel *evsel,
946 struct perf_sample *sample);
948 static int process_sample_event(struct perf_tool *tool __maybe_unused,
949 union perf_event *event,
950 struct perf_sample *sample,
952 struct machine *machine)
955 struct thread *thread = machine__findnew_thread(machine, sample->pid,
958 if (thread == NULL) {
959 pr_debug("problem processing %d event, skipping it.\n",
964 if (perf_kmem__skip_sample(sample))
967 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
969 if (evsel->handler != NULL) {
970 tracepoint_handler f = evsel->handler;
971 err = f(evsel, sample);
979 static struct perf_tool perf_kmem = {
980 .sample = process_sample_event,
981 .comm = perf_event__process_comm,
982 .mmap = perf_event__process_mmap,
983 .mmap2 = perf_event__process_mmap2,
984 .namespaces = perf_event__process_namespaces,
985 .ordered_events = true,
988 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
993 return 100.0 - (100.0 * n_req / n_alloc);
996 static void __print_slab_result(struct rb_root *root,
997 struct perf_session *session,
998 int n_lines, int is_caller)
1000 struct rb_node *next;
1001 struct machine *machine = &session->machines.host;
1003 printf("%.105s\n", graph_dotted_line);
1004 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
1005 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
1006 printf("%.105s\n", graph_dotted_line);
1008 next = rb_first(root);
1010 while (next && n_lines--) {
1011 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
1013 struct symbol *sym = NULL;
1019 addr = data->call_site;
1021 sym = machine__find_kernel_symbol(machine, addr, &map);
1026 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1027 addr - map->unmap_ip(map, sym->start));
1029 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1030 printf(" %-34s |", buf);
1032 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1033 (unsigned long long)data->bytes_alloc,
1034 (unsigned long)data->bytes_alloc / data->hit,
1035 (unsigned long long)data->bytes_req,
1036 (unsigned long)data->bytes_req / data->hit,
1037 (unsigned long)data->hit,
1038 (unsigned long)data->pingpong,
1039 fragmentation(data->bytes_req, data->bytes_alloc));
1041 next = rb_next(next);
1045 printf(" ... | ... | ... | ... | ... | ... \n");
1047 printf("%.105s\n", graph_dotted_line);
1050 static const char * const migrate_type_str[] = {
1059 static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1061 struct rb_node *next = rb_first(&page_alloc_sorted);
1062 struct machine *machine = &session->machines.host;
1064 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1066 printf("\n%.105s\n", graph_dotted_line);
1067 printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1068 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1069 gfp_len, "GFP flags");
1070 printf("%.105s\n", graph_dotted_line);
1073 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1075 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1077 while (next && n_lines--) {
1078 struct page_stat *data;
1084 data = rb_entry(next, struct page_stat, node);
1085 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1089 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1091 printf(format, (unsigned long long)data->page,
1092 (unsigned long long)data->alloc_bytes / 1024,
1093 data->nr_alloc, data->order,
1094 migrate_type_str[data->migrate_type],
1095 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1097 next = rb_next(next);
1100 if (n_lines == -1) {
1101 printf(" ... | ... | ... | ... | ... | %-*s | ...\n",
1105 printf("%.105s\n", graph_dotted_line);
1108 static void __print_page_caller_result(struct perf_session *session, int n_lines)
1110 struct rb_node *next = rb_first(&page_caller_sorted);
1111 struct machine *machine = &session->machines.host;
1112 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1114 printf("\n%.105s\n", graph_dotted_line);
1115 printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1116 live_page ? "Live" : "Total", gfp_len, "GFP flags");
1117 printf("%.105s\n", graph_dotted_line);
1119 while (next && n_lines--) {
1120 struct page_stat *data;
1126 data = rb_entry(next, struct page_stat, node);
1127 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1131 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1133 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1134 (unsigned long long)data->alloc_bytes / 1024,
1135 data->nr_alloc, data->order,
1136 migrate_type_str[data->migrate_type],
1137 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1139 next = rb_next(next);
1142 if (n_lines == -1) {
1143 printf(" ... | ... | ... | ... | %-*s | ...\n",
1147 printf("%.105s\n", graph_dotted_line);
1150 static void print_gfp_flags(void)
1155 printf("# GFP flags\n");
1156 printf("# ---------\n");
1157 for (i = 0; i < nr_gfps; i++) {
1158 printf("# %08x: %*s: %s\n", gfps[i].flags,
1159 (int) max_gfp_len, gfps[i].compact_str,
1160 gfps[i].human_readable);
1164 static void print_slab_summary(void)
1166 printf("\nSUMMARY (SLAB allocator)");
1167 printf("\n========================\n");
1168 printf("Total bytes requested: %'lu\n", total_requested);
1169 printf("Total bytes allocated: %'lu\n", total_allocated);
1170 printf("Total bytes freed: %'lu\n", total_freed);
1171 if (total_allocated > total_freed) {
1172 printf("Net total bytes allocated: %'lu\n",
1173 total_allocated - total_freed);
1175 printf("Total bytes wasted on internal fragmentation: %'lu\n",
1176 total_allocated - total_requested);
1177 printf("Internal fragmentation: %f%%\n",
1178 fragmentation(total_requested, total_allocated));
1179 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1182 static void print_page_summary(void)
1185 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1186 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1188 printf("\nSUMMARY (page allocator)");
1189 printf("\n========================\n");
1190 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1191 nr_page_allocs, total_page_alloc_bytes / 1024);
1192 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
1193 nr_page_frees, total_page_free_bytes / 1024);
1196 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1197 nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1198 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1199 nr_page_allocs - nr_alloc_freed,
1200 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1201 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1202 nr_page_nomatch, total_page_nomatch_bytes / 1024);
1205 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1206 nr_page_fails, total_page_fail_bytes / 1024);
1209 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
1210 "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1211 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
1212 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1213 graph_dotted_line, graph_dotted_line);
1215 for (o = 0; o < MAX_PAGE_ORDER; o++) {
1217 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1218 if (order_stats[o][m])
1219 printf(" %'12d", order_stats[o][m]);
1221 printf(" %12c", '.');
1227 static void print_slab_result(struct perf_session *session)
1230 __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1232 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1233 print_slab_summary();
1236 static void print_page_result(struct perf_session *session)
1238 if (caller_flag || alloc_flag)
1241 __print_page_caller_result(session, caller_lines);
1243 __print_page_alloc_result(session, alloc_lines);
1244 print_page_summary();
1247 static void print_result(struct perf_session *session)
1250 print_slab_result(session);
1252 print_page_result(session);
1255 static LIST_HEAD(slab_caller_sort);
1256 static LIST_HEAD(slab_alloc_sort);
1257 static LIST_HEAD(page_caller_sort);
1258 static LIST_HEAD(page_alloc_sort);
1260 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1261 struct list_head *sort_list)
1263 struct rb_node **new = &(root->rb_node);
1264 struct rb_node *parent = NULL;
1265 struct sort_dimension *sort;
1268 struct alloc_stat *this;
1271 this = rb_entry(*new, struct alloc_stat, node);
1274 list_for_each_entry(sort, sort_list, list) {
1275 cmp = sort->cmp(data, this);
1281 new = &((*new)->rb_left);
1283 new = &((*new)->rb_right);
1286 rb_link_node(&data->node, parent, new);
1287 rb_insert_color(&data->node, root);
1290 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1291 struct list_head *sort_list)
1293 struct rb_node *node;
1294 struct alloc_stat *data;
1297 node = rb_first(root);
1301 rb_erase(node, root);
1302 data = rb_entry(node, struct alloc_stat, node);
1303 sort_slab_insert(root_sorted, data, sort_list);
1307 static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1308 struct list_head *sort_list)
1310 struct rb_node **new = &root->rb_node;
1311 struct rb_node *parent = NULL;
1312 struct sort_dimension *sort;
1315 struct page_stat *this;
1318 this = rb_entry(*new, struct page_stat, node);
1321 list_for_each_entry(sort, sort_list, list) {
1322 cmp = sort->cmp(data, this);
1328 new = &parent->rb_left;
1330 new = &parent->rb_right;
1333 rb_link_node(&data->node, parent, new);
1334 rb_insert_color(&data->node, root);
1337 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1338 struct list_head *sort_list)
1340 struct rb_node *node;
1341 struct page_stat *data;
1344 node = rb_first(root);
1348 rb_erase(node, root);
1349 data = rb_entry(node, struct page_stat, node);
1350 sort_page_insert(root_sorted, data, sort_list);
1354 static void sort_result(void)
1357 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1359 __sort_slab_result(&root_caller_stat, &root_caller_sorted,
1364 __sort_page_result(&page_live_tree, &page_alloc_sorted,
1367 __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1370 __sort_page_result(&page_caller_tree, &page_caller_sorted,
1375 static int __cmd_kmem(struct perf_session *session)
1378 struct evsel *evsel;
1379 const struct evsel_str_handler kmem_tracepoints[] = {
1380 /* slab allocator */
1381 { "kmem:kmalloc", evsel__process_alloc_event, },
1382 { "kmem:kmem_cache_alloc", evsel__process_alloc_event, },
1383 { "kmem:kmalloc_node", evsel__process_alloc_event, },
1384 { "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
1385 { "kmem:kfree", evsel__process_free_event, },
1386 { "kmem:kmem_cache_free", evsel__process_free_event, },
1387 /* page allocator */
1388 { "kmem:mm_page_alloc", evsel__process_page_alloc_event, },
1389 { "kmem:mm_page_free", evsel__process_page_free_event, },
1392 if (!perf_session__has_traces(session, "kmem record"))
1395 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1396 pr_err("Initializing perf session tracepoint handlers failed\n");
1400 evlist__for_each_entry(session->evlist, evsel) {
1401 if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") &&
1402 evsel__field(evsel, "pfn")) {
1409 err = perf_session__process_events(session);
1411 pr_err("error during process events: %d\n", err);
1415 print_result(session);
1420 /* slab sort keys */
1421 static int ptr_cmp(void *a, void *b)
1423 struct alloc_stat *l = a;
1424 struct alloc_stat *r = b;
1426 if (l->ptr < r->ptr)
1428 else if (l->ptr > r->ptr)
1433 static struct sort_dimension ptr_sort_dimension = {
1438 static int slab_callsite_cmp(void *a, void *b)
1440 struct alloc_stat *l = a;
1441 struct alloc_stat *r = b;
1443 if (l->call_site < r->call_site)
1445 else if (l->call_site > r->call_site)
1450 static struct sort_dimension callsite_sort_dimension = {
1452 .cmp = slab_callsite_cmp,
1455 static int hit_cmp(void *a, void *b)
1457 struct alloc_stat *l = a;
1458 struct alloc_stat *r = b;
1460 if (l->hit < r->hit)
1462 else if (l->hit > r->hit)
1467 static struct sort_dimension hit_sort_dimension = {
1472 static int bytes_cmp(void *a, void *b)
1474 struct alloc_stat *l = a;
1475 struct alloc_stat *r = b;
1477 if (l->bytes_alloc < r->bytes_alloc)
1479 else if (l->bytes_alloc > r->bytes_alloc)
1484 static struct sort_dimension bytes_sort_dimension = {
1489 static int frag_cmp(void *a, void *b)
1492 struct alloc_stat *l = a;
1493 struct alloc_stat *r = b;
1495 x = fragmentation(l->bytes_req, l->bytes_alloc);
1496 y = fragmentation(r->bytes_req, r->bytes_alloc);
1505 static struct sort_dimension frag_sort_dimension = {
1510 static int pingpong_cmp(void *a, void *b)
1512 struct alloc_stat *l = a;
1513 struct alloc_stat *r = b;
1515 if (l->pingpong < r->pingpong)
1517 else if (l->pingpong > r->pingpong)
1522 static struct sort_dimension pingpong_sort_dimension = {
1524 .cmp = pingpong_cmp,
1527 /* page sort keys */
1528 static int page_cmp(void *a, void *b)
1530 struct page_stat *l = a;
1531 struct page_stat *r = b;
1533 if (l->page < r->page)
1535 else if (l->page > r->page)
1540 static struct sort_dimension page_sort_dimension = {
1545 static int page_callsite_cmp(void *a, void *b)
1547 struct page_stat *l = a;
1548 struct page_stat *r = b;
1550 if (l->callsite < r->callsite)
1552 else if (l->callsite > r->callsite)
1557 static struct sort_dimension page_callsite_sort_dimension = {
1559 .cmp = page_callsite_cmp,
1562 static int page_hit_cmp(void *a, void *b)
1564 struct page_stat *l = a;
1565 struct page_stat *r = b;
1567 if (l->nr_alloc < r->nr_alloc)
1569 else if (l->nr_alloc > r->nr_alloc)
1574 static struct sort_dimension page_hit_sort_dimension = {
1576 .cmp = page_hit_cmp,
1579 static int page_bytes_cmp(void *a, void *b)
1581 struct page_stat *l = a;
1582 struct page_stat *r = b;
1584 if (l->alloc_bytes < r->alloc_bytes)
1586 else if (l->alloc_bytes > r->alloc_bytes)
1591 static struct sort_dimension page_bytes_sort_dimension = {
1593 .cmp = page_bytes_cmp,
1596 static int page_order_cmp(void *a, void *b)
1598 struct page_stat *l = a;
1599 struct page_stat *r = b;
1601 if (l->order < r->order)
1603 else if (l->order > r->order)
1608 static struct sort_dimension page_order_sort_dimension = {
1610 .cmp = page_order_cmp,
1613 static int migrate_type_cmp(void *a, void *b)
1615 struct page_stat *l = a;
1616 struct page_stat *r = b;
1618 /* for internal use to find free'd page */
1619 if (l->migrate_type == -1U)
1622 if (l->migrate_type < r->migrate_type)
1624 else if (l->migrate_type > r->migrate_type)
1629 static struct sort_dimension migrate_type_sort_dimension = {
1631 .cmp = migrate_type_cmp,
1634 static int gfp_flags_cmp(void *a, void *b)
1636 struct page_stat *l = a;
1637 struct page_stat *r = b;
1639 /* for internal use to find free'd page */
1640 if (l->gfp_flags == -1U)
1643 if (l->gfp_flags < r->gfp_flags)
1645 else if (l->gfp_flags > r->gfp_flags)
1650 static struct sort_dimension gfp_flags_sort_dimension = {
1652 .cmp = gfp_flags_cmp,
1655 static struct sort_dimension *slab_sorts[] = {
1656 &ptr_sort_dimension,
1657 &callsite_sort_dimension,
1658 &hit_sort_dimension,
1659 &bytes_sort_dimension,
1660 &frag_sort_dimension,
1661 &pingpong_sort_dimension,
1664 static struct sort_dimension *page_sorts[] = {
1665 &page_sort_dimension,
1666 &page_callsite_sort_dimension,
1667 &page_hit_sort_dimension,
1668 &page_bytes_sort_dimension,
1669 &page_order_sort_dimension,
1670 &migrate_type_sort_dimension,
1671 &gfp_flags_sort_dimension,
1674 static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1676 struct sort_dimension *sort;
1679 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1680 if (!strcmp(slab_sorts[i]->name, tok)) {
1681 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1683 pr_err("%s: memdup failed\n", __func__);
1686 list_add_tail(&sort->list, list);
1694 static int page_sort_dimension__add(const char *tok, struct list_head *list)
1696 struct sort_dimension *sort;
1699 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1700 if (!strcmp(page_sorts[i]->name, tok)) {
1701 sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1703 pr_err("%s: memdup failed\n", __func__);
1706 list_add_tail(&sort->list, list);
1714 static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1717 char *str = strdup(arg);
1721 pr_err("%s: strdup failed\n", __func__);
1726 tok = strsep(&pos, ",");
1729 if (slab_sort_dimension__add(tok, sort_list) < 0) {
1730 pr_err("Unknown slab --sort key: '%s'", tok);
1740 static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1743 char *str = strdup(arg);
1747 pr_err("%s: strdup failed\n", __func__);
1752 tok = strsep(&pos, ",");
1755 if (page_sort_dimension__add(tok, sort_list) < 0) {
1756 pr_err("Unknown page --sort key: '%s'", tok);
1766 static int parse_sort_opt(const struct option *opt __maybe_unused,
1767 const char *arg, int unset __maybe_unused)
1772 if (kmem_page > kmem_slab ||
1773 (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1774 if (caller_flag > alloc_flag)
1775 return setup_page_sorting(&page_caller_sort, arg);
1777 return setup_page_sorting(&page_alloc_sort, arg);
1779 if (caller_flag > alloc_flag)
1780 return setup_slab_sorting(&slab_caller_sort, arg);
1782 return setup_slab_sorting(&slab_alloc_sort, arg);
1788 static int parse_caller_opt(const struct option *opt __maybe_unused,
1789 const char *arg __maybe_unused,
1790 int unset __maybe_unused)
1792 caller_flag = (alloc_flag + 1);
1796 static int parse_alloc_opt(const struct option *opt __maybe_unused,
1797 const char *arg __maybe_unused,
1798 int unset __maybe_unused)
1800 alloc_flag = (caller_flag + 1);
1804 static int parse_slab_opt(const struct option *opt __maybe_unused,
1805 const char *arg __maybe_unused,
1806 int unset __maybe_unused)
1808 kmem_slab = (kmem_page + 1);
1812 static int parse_page_opt(const struct option *opt __maybe_unused,
1813 const char *arg __maybe_unused,
1814 int unset __maybe_unused)
1816 kmem_page = (kmem_slab + 1);
1820 static int parse_line_opt(const struct option *opt __maybe_unused,
1821 const char *arg, int unset __maybe_unused)
1828 lines = strtoul(arg, NULL, 10);
1830 if (caller_flag > alloc_flag)
1831 caller_lines = lines;
1833 alloc_lines = lines;
1838 static bool slab_legacy_tp_is_exposed(void)
1841 * The tracepoints "kmem:kmalloc_node" and
1842 * "kmem:kmem_cache_alloc_node" have been removed on the latest
1843 * kernel, if the tracepoint "kmem:kmalloc_node" is existed it
1844 * means the tool is running on an old kernel, we need to
1845 * rollback to support these legacy tracepoints.
1847 return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
1851 static int __cmd_record(int argc, const char **argv)
1853 const char * const record_args[] = {
1854 "record", "-a", "-R", "-c", "1",
1856 const char * const slab_events[] = {
1857 "-e", "kmem:kmalloc",
1859 "-e", "kmem:kmem_cache_alloc",
1860 "-e", "kmem:kmem_cache_free",
1862 const char * const slab_legacy_events[] = {
1863 "-e", "kmem:kmalloc_node",
1864 "-e", "kmem:kmem_cache_alloc_node",
1866 const char * const page_events[] = {
1867 "-e", "kmem:mm_page_alloc",
1868 "-e", "kmem:mm_page_free",
1870 unsigned int rec_argc, i, j;
1871 const char **rec_argv;
1872 unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
1874 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1876 rec_argc += ARRAY_SIZE(slab_events);
1877 if (slab_legacy_tp_exposed)
1878 rec_argc += ARRAY_SIZE(slab_legacy_events);
1881 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1883 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1885 if (rec_argv == NULL)
1888 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1889 rec_argv[i] = strdup(record_args[i]);
1892 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1893 rec_argv[i] = strdup(slab_events[j]);
1894 if (slab_legacy_tp_exposed) {
1895 for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
1896 rec_argv[i] = strdup(slab_legacy_events[j]);
1900 rec_argv[i++] = strdup("-g");
1902 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1903 rec_argv[i] = strdup(page_events[j]);
1906 for (j = 1; j < (unsigned int)argc; j++, i++)
1907 rec_argv[i] = argv[j];
1909 return cmd_record(i, rec_argv);
1912 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1914 if (!strcmp(var, "kmem.default")) {
1915 if (!strcmp(value, "slab"))
1916 kmem_default = KMEM_SLAB;
1917 else if (!strcmp(value, "page"))
1918 kmem_default = KMEM_PAGE;
1920 pr_err("invalid default value ('slab' or 'page' required): %s\n",
1928 int cmd_kmem(int argc, const char **argv)
1930 const char * const default_slab_sort = "frag,hit,bytes";
1931 const char * const default_page_sort = "bytes,hit";
1932 struct perf_data data = {
1933 .mode = PERF_DATA_MODE_READ,
1935 const struct option kmem_options[] = {
1936 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1937 OPT_INCR('v', "verbose", &verbose,
1938 "be more verbose (show symbol address, etc)"),
1939 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1940 "show per-callsite statistics", parse_caller_opt),
1941 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1942 "show per-allocation statistics", parse_alloc_opt),
1943 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1944 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1945 "page, order, migtype, gfp", parse_sort_opt),
1946 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1947 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1948 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1949 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1951 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1953 OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1954 OPT_STRING(0, "time", &time_str, "str",
1955 "Time span of interest (start,stop)"),
1958 const char *const kmem_subcommands[] = { "record", "stat", NULL };
1959 const char *kmem_usage[] = {
1963 struct perf_session *session;
1964 static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
1965 int ret = perf_config(kmem_config, NULL);
1970 argc = parse_options_subcommand(argc, argv, kmem_options,
1971 kmem_subcommands, kmem_usage,
1972 PARSE_OPT_STOP_AT_NON_OPTION);
1975 usage_with_options(kmem_usage, kmem_options);
1977 if (kmem_slab == 0 && kmem_page == 0) {
1978 if (kmem_default == KMEM_SLAB)
1984 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
1986 return __cmd_record(argc, argv);
1989 data.path = input_name;
1991 kmem_session = session = perf_session__new(&data, &perf_kmem);
1992 if (IS_ERR(session))
1993 return PTR_ERR(session);
1998 if (!evlist__find_tracepoint_by_name(session->evlist, "kmem:kmalloc")) {
1999 pr_err(errmsg, "slab", "slab");
2005 struct evsel *evsel = evlist__find_tracepoint_by_name(session->evlist, "kmem:mm_page_alloc");
2007 if (evsel == NULL) {
2008 pr_err(errmsg, "page", "page");
2012 kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
2013 symbol_conf.use_callchain = true;
2016 symbol__init(&session->header.env);
2018 if (perf_time__parse_str(&ptime, time_str) != 0) {
2019 pr_err("Invalid time string\n");
2024 if (!strcmp(argv[0], "stat")) {
2025 setlocale(LC_ALL, "");
2027 if (cpu__setup_cpunode_map())
2030 if (list_empty(&slab_caller_sort))
2031 setup_slab_sorting(&slab_caller_sort, default_slab_sort);
2032 if (list_empty(&slab_alloc_sort))
2033 setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
2034 if (list_empty(&page_caller_sort))
2035 setup_page_sorting(&page_caller_sort, default_page_sort);
2036 if (list_empty(&page_alloc_sort))
2037 setup_page_sorting(&page_alloc_sort, default_page_sort);
2040 setup_page_sorting(&page_alloc_sort_input,
2041 "page,order,migtype,gfp");
2042 setup_page_sorting(&page_caller_sort_input,
2043 "callsite,order,migtype,gfp");
2045 ret = __cmd_kmem(session);
2047 usage_with_options(kmem_usage, kmem_options);
2050 perf_session__delete(session);