1 // SPDX-License-Identifier: GPL-2.0
5 #include "util/evlist.h"
6 #include "util/evsel.h"
7 #include "util/config.h"
9 #include "util/symbol.h"
10 #include "util/thread.h"
11 #include "util/header.h"
12 #include "util/session.h"
13 #include "util/tool.h"
14 #include "util/callchain.h"
15 #include "util/time-utils.h"
16 #include <linux/err.h>
18 #include <subcmd/pager.h>
19 #include <subcmd/parse-options.h>
20 #include "util/trace-event.h"
21 #include "util/data.h"
22 #include "util/cpumap.h"
24 #include "util/debug.h"
25 #include "util/string2.h"
26 #include "util/util.h"
28 #include <linux/kernel.h>
29 #include <linux/numa.h>
30 #include <linux/rbtree.h>
31 #include <linux/string.h>
32 #include <linux/zalloc.h>
38 #include <linux/ctype.h>
39 #include <traceevent/event-parse.h>
44 static long kmem_page_size;
48 } kmem_default = KMEM_SLAB; /* for backward compatibility */
51 typedef int (*sort_fn_t)(void *, void *);
53 static int alloc_flag;
54 static int caller_flag;
56 static int alloc_lines = -1;
57 static int caller_lines = -1;
75 static struct rb_root root_alloc_stat;
76 static struct rb_root root_alloc_sorted;
77 static struct rb_root root_caller_stat;
78 static struct rb_root root_caller_sorted;
80 static unsigned long total_requested, total_allocated, total_freed;
81 static unsigned long nr_allocs, nr_cross_allocs;
83 /* filters for controlling start and stop of time of analysis */
84 static struct perf_time_interval ptime;
87 static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
88 int bytes_req, int bytes_alloc, int cpu)
90 struct rb_node **node = &root_alloc_stat.rb_node;
91 struct rb_node *parent = NULL;
92 struct alloc_stat *data = NULL;
96 data = rb_entry(*node, struct alloc_stat, node);
99 node = &(*node)->rb_right;
100 else if (ptr < data->ptr)
101 node = &(*node)->rb_left;
106 if (data && data->ptr == ptr) {
108 data->bytes_req += bytes_req;
109 data->bytes_alloc += bytes_alloc;
111 data = malloc(sizeof(*data));
113 pr_err("%s: malloc failed\n", __func__);
119 data->bytes_req = bytes_req;
120 data->bytes_alloc = bytes_alloc;
122 rb_link_node(&data->node, parent, node);
123 rb_insert_color(&data->node, &root_alloc_stat);
125 data->call_site = call_site;
126 data->alloc_cpu = cpu;
127 data->last_alloc = bytes_alloc;
132 static int insert_caller_stat(unsigned long call_site,
133 int bytes_req, int bytes_alloc)
135 struct rb_node **node = &root_caller_stat.rb_node;
136 struct rb_node *parent = NULL;
137 struct alloc_stat *data = NULL;
141 data = rb_entry(*node, struct alloc_stat, node);
143 if (call_site > data->call_site)
144 node = &(*node)->rb_right;
145 else if (call_site < data->call_site)
146 node = &(*node)->rb_left;
151 if (data && data->call_site == call_site) {
153 data->bytes_req += bytes_req;
154 data->bytes_alloc += bytes_alloc;
156 data = malloc(sizeof(*data));
158 pr_err("%s: malloc failed\n", __func__);
161 data->call_site = call_site;
164 data->bytes_req = bytes_req;
165 data->bytes_alloc = bytes_alloc;
167 rb_link_node(&data->node, parent, node);
168 rb_insert_color(&data->node, &root_caller_stat);
174 static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *sample)
176 unsigned long ptr = evsel__intval(evsel, sample, "ptr"),
177 call_site = evsel__intval(evsel, sample, "call_site");
178 int bytes_req = evsel__intval(evsel, sample, "bytes_req"),
179 bytes_alloc = evsel__intval(evsel, sample, "bytes_alloc");
181 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
182 insert_caller_stat(call_site, bytes_req, bytes_alloc))
185 total_requested += bytes_req;
186 total_allocated += bytes_alloc;
191 * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
192 * version of tracepoints") adds the field "node" into the
193 * tracepoints 'kmalloc' and 'kmem_cache_alloc'.
195 * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
196 * also contain the field "node".
198 * If the tracepoint contains the field "node" the tool stats the
201 if (evsel__field(evsel, "node")) {
204 node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
205 node2 = evsel__intval(evsel, sample, "node");
208 * If the field "node" is NUMA_NO_NODE (-1), we don't take it
209 * as a cross allocation.
211 if ((node2 != NUMA_NO_NODE) && (node1 != node2))
218 static int ptr_cmp(void *, void *);
219 static int slab_callsite_cmp(void *, void *);
221 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
222 unsigned long call_site,
223 struct rb_root *root,
226 struct rb_node *node = root->rb_node;
227 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
230 struct alloc_stat *data;
233 data = rb_entry(node, struct alloc_stat, node);
235 cmp = sort_fn(&key, data);
237 node = node->rb_left;
239 node = node->rb_right;
246 static int evsel__process_free_event(struct evsel *evsel, struct perf_sample *sample)
248 unsigned long ptr = evsel__intval(evsel, sample, "ptr");
249 struct alloc_stat *s_alloc, *s_caller;
251 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
255 total_freed += s_alloc->last_alloc;
257 if ((short)sample->cpu != s_alloc->alloc_cpu) {
260 s_caller = search_alloc_stat(0, s_alloc->call_site,
265 s_caller->pingpong++;
267 s_alloc->alloc_cpu = -1;
272 static u64 total_page_alloc_bytes;
273 static u64 total_page_free_bytes;
274 static u64 total_page_nomatch_bytes;
275 static u64 total_page_fail_bytes;
276 static unsigned long nr_page_allocs;
277 static unsigned long nr_page_frees;
278 static unsigned long nr_page_fails;
279 static unsigned long nr_page_nomatch;
282 static bool live_page;
283 static struct perf_session *kmem_session;
285 #define MAX_MIGRATE_TYPES 6
286 #define MAX_PAGE_ORDER 11
288 static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
296 unsigned migrate_type;
303 static struct rb_root page_live_tree;
304 static struct rb_root page_alloc_tree;
305 static struct rb_root page_alloc_sorted;
306 static struct rb_root page_caller_tree;
307 static struct rb_root page_caller_sorted;
315 static int nr_alloc_funcs;
316 static struct alloc_func *alloc_func_list;
318 static int funcmp(const void *a, const void *b)
320 const struct alloc_func *fa = a;
321 const struct alloc_func *fb = b;
323 if (fa->start > fb->start)
329 static int callcmp(const void *a, const void *b)
331 const struct alloc_func *fa = a;
332 const struct alloc_func *fb = b;
334 if (fb->start <= fa->start && fa->end < fb->end)
337 if (fa->start > fb->start)
343 static int build_alloc_func_list(void)
346 struct map *kernel_map;
348 struct rb_node *node;
349 struct alloc_func *func;
350 struct machine *machine = &kmem_session->machines.host;
351 regex_t alloc_func_regex;
352 static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
354 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
358 regerror(ret, &alloc_func_regex, err, sizeof(err));
359 pr_err("Invalid regex: %s\n%s", pattern, err);
363 kernel_map = machine__kernel_map(machine);
364 if (map__load(kernel_map) < 0) {
365 pr_err("cannot load kernel map\n");
369 map__for_each_symbol(kernel_map, sym, node) {
370 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
373 func = realloc(alloc_func_list,
374 (nr_alloc_funcs + 1) * sizeof(*func));
378 pr_debug("alloc func: %s\n", sym->name);
379 func[nr_alloc_funcs].start = sym->start;
380 func[nr_alloc_funcs].end = sym->end;
381 func[nr_alloc_funcs].name = sym->name;
383 alloc_func_list = func;
387 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
389 regfree(&alloc_func_regex);
394 * Find first non-memory allocation function from callchain.
395 * The allocation functions are in the 'alloc_func_list'.
397 static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
399 struct addr_location al;
400 struct machine *machine = &kmem_session->machines.host;
401 struct callchain_cursor_node *node;
402 struct callchain_cursor *cursor;
403 u64 result = sample->ip;
405 addr_location__init(&al);
406 if (alloc_func_list == NULL) {
407 if (build_alloc_func_list() < 0)
411 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
413 cursor = get_tls_callchain_cursor();
417 sample__resolve_callchain(sample, cursor, NULL, evsel, &al, 16);
419 callchain_cursor_commit(cursor);
421 struct alloc_func key, *caller;
424 node = callchain_cursor_current(cursor);
428 key.start = key.end = node->ip;
429 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
430 sizeof(key), callcmp);
434 addr = map__dso_unmap_ip(node->ms.map, node->ip);
441 pr_debug3("skipping alloc function: %s\n", caller->name);
443 callchain_cursor_advance(cursor);
446 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
448 addr_location__exit(&al);
452 struct sort_dimension {
455 struct list_head list;
458 static LIST_HEAD(page_alloc_sort_input);
459 static LIST_HEAD(page_caller_sort_input);
461 static struct page_stat *
462 __page_stat__findnew_page(struct page_stat *pstat, bool create)
464 struct rb_node **node = &page_live_tree.rb_node;
465 struct rb_node *parent = NULL;
466 struct page_stat *data;
472 data = rb_entry(*node, struct page_stat, node);
474 cmp = data->page - pstat->page;
476 node = &parent->rb_left;
478 node = &parent->rb_right;
486 data = zalloc(sizeof(*data));
488 data->page = pstat->page;
489 data->order = pstat->order;
490 data->gfp_flags = pstat->gfp_flags;
491 data->migrate_type = pstat->migrate_type;
493 rb_link_node(&data->node, parent, node);
494 rb_insert_color(&data->node, &page_live_tree);
500 static struct page_stat *page_stat__find_page(struct page_stat *pstat)
502 return __page_stat__findnew_page(pstat, false);
505 static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
507 return __page_stat__findnew_page(pstat, true);
510 static struct page_stat *
511 __page_stat__findnew_alloc(struct page_stat *pstat, bool create)
513 struct rb_node **node = &page_alloc_tree.rb_node;
514 struct rb_node *parent = NULL;
515 struct page_stat *data;
516 struct sort_dimension *sort;
522 data = rb_entry(*node, struct page_stat, node);
524 list_for_each_entry(sort, &page_alloc_sort_input, list) {
525 cmp = sort->cmp(pstat, data);
531 node = &parent->rb_left;
533 node = &parent->rb_right;
541 data = zalloc(sizeof(*data));
543 data->page = pstat->page;
544 data->order = pstat->order;
545 data->gfp_flags = pstat->gfp_flags;
546 data->migrate_type = pstat->migrate_type;
548 rb_link_node(&data->node, parent, node);
549 rb_insert_color(&data->node, &page_alloc_tree);
555 static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
557 return __page_stat__findnew_alloc(pstat, false);
560 static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
562 return __page_stat__findnew_alloc(pstat, true);
565 static struct page_stat *
566 __page_stat__findnew_caller(struct page_stat *pstat, bool create)
568 struct rb_node **node = &page_caller_tree.rb_node;
569 struct rb_node *parent = NULL;
570 struct page_stat *data;
571 struct sort_dimension *sort;
577 data = rb_entry(*node, struct page_stat, node);
579 list_for_each_entry(sort, &page_caller_sort_input, list) {
580 cmp = sort->cmp(pstat, data);
586 node = &parent->rb_left;
588 node = &parent->rb_right;
596 data = zalloc(sizeof(*data));
598 data->callsite = pstat->callsite;
599 data->order = pstat->order;
600 data->gfp_flags = pstat->gfp_flags;
601 data->migrate_type = pstat->migrate_type;
603 rb_link_node(&data->node, parent, node);
604 rb_insert_color(&data->node, &page_caller_tree);
610 static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
612 return __page_stat__findnew_caller(pstat, false);
615 static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
617 return __page_stat__findnew_caller(pstat, true);
620 static bool valid_page(u64 pfn_or_page)
622 if (use_pfn && pfn_or_page == -1UL)
624 if (!use_pfn && pfn_or_page == 0)
632 char *human_readable;
635 static struct gfp_flag *gfps;
638 static int gfpcmp(const void *a, const void *b)
640 const struct gfp_flag *fa = a;
641 const struct gfp_flag *fb = b;
643 return fa->flags - fb->flags;
646 /* see include/trace/events/mmflags.h */
647 static const struct {
648 const char *original;
650 } gfp_compact_table[] = {
651 { "GFP_TRANSHUGE", "THP" },
652 { "GFP_TRANSHUGE_LIGHT", "THL" },
653 { "GFP_HIGHUSER_MOVABLE", "HUM" },
654 { "GFP_HIGHUSER", "HU" },
656 { "GFP_KERNEL_ACCOUNT", "KAC" },
657 { "GFP_KERNEL", "K" },
658 { "GFP_NOFS", "NF" },
659 { "GFP_ATOMIC", "A" },
660 { "GFP_NOIO", "NI" },
661 { "GFP_NOWAIT", "NW" },
663 { "__GFP_HIGHMEM", "HM" },
664 { "GFP_DMA32", "D32" },
665 { "__GFP_HIGH", "H" },
668 { "__GFP_NOWARN", "NWR" },
669 { "__GFP_RETRY_MAYFAIL", "R" },
670 { "__GFP_NOFAIL", "NF" },
671 { "__GFP_NORETRY", "NR" },
672 { "__GFP_COMP", "C" },
673 { "__GFP_ZERO", "Z" },
674 { "__GFP_NOMEMALLOC", "NMA" },
675 { "__GFP_MEMALLOC", "MA" },
676 { "__GFP_HARDWALL", "HW" },
677 { "__GFP_THISNODE", "TN" },
678 { "__GFP_RECLAIMABLE", "RC" },
679 { "__GFP_MOVABLE", "M" },
680 { "__GFP_ACCOUNT", "AC" },
681 { "__GFP_WRITE", "WR" },
682 { "__GFP_RECLAIM", "R" },
683 { "__GFP_DIRECT_RECLAIM", "DR" },
684 { "__GFP_KSWAPD_RECLAIM", "KR" },
687 static size_t max_gfp_len;
689 static char *compact_gfp_flags(char *gfp_flags)
691 char *orig_flags = strdup(gfp_flags);
692 char *new_flags = NULL;
693 char *str, *pos = NULL;
696 if (orig_flags == NULL)
699 str = strtok_r(orig_flags, "|", &pos);
705 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
706 if (strcmp(gfp_compact_table[i].original, str))
709 cpt = gfp_compact_table[i].compact;
710 new = realloc(new_flags, len + strlen(cpt) + 2);
720 strcpy(new_flags, cpt);
722 strcat(new_flags, "|");
723 strcat(new_flags, cpt);
730 str = strtok_r(NULL, "|", &pos);
733 if (max_gfp_len < len)
740 static char *compact_gfp_string(unsigned long gfp_flags)
742 struct gfp_flag key = {
745 struct gfp_flag *gfp;
747 gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
749 return gfp->compact_str;
754 static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
755 unsigned int gfp_flags)
757 struct tep_record record = {
759 .data = sample->raw_data,
760 .size = sample->raw_size,
762 struct trace_seq seq;
763 char *str, *pos = NULL;
766 struct gfp_flag key = {
770 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
774 trace_seq_init(&seq);
775 tep_print_event(evsel->tp_format->tep,
776 &seq, &record, "%s", TEP_PRINT_INFO);
778 str = strtok_r(seq.buffer, " ", &pos);
780 if (!strncmp(str, "gfp_flags=", 10)) {
781 struct gfp_flag *new;
783 new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
790 new->flags = gfp_flags;
791 new->human_readable = strdup(str + 10);
792 new->compact_str = compact_gfp_flags(str + 10);
793 if (!new->human_readable || !new->compact_str)
796 qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
799 str = strtok_r(NULL, " ", &pos);
802 trace_seq_destroy(&seq);
806 static int evsel__process_page_alloc_event(struct evsel *evsel, struct perf_sample *sample)
809 unsigned int order = evsel__intval(evsel, sample, "order");
810 unsigned int gfp_flags = evsel__intval(evsel, sample, "gfp_flags");
811 unsigned int migrate_type = evsel__intval(evsel, sample,
813 u64 bytes = kmem_page_size << order;
815 struct page_stat *pstat;
816 struct page_stat this = {
818 .gfp_flags = gfp_flags,
819 .migrate_type = migrate_type,
823 page = evsel__intval(evsel, sample, "pfn");
825 page = evsel__intval(evsel, sample, "page");
828 total_page_alloc_bytes += bytes;
830 if (!valid_page(page)) {
832 total_page_fail_bytes += bytes;
837 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
840 callsite = find_callsite(evsel, sample);
843 * This is to find the current page (with correct gfp flags and
844 * migrate type) at free event.
847 pstat = page_stat__findnew_page(&this);
852 pstat->alloc_bytes += bytes;
853 pstat->callsite = callsite;
856 pstat = page_stat__findnew_alloc(&this);
861 pstat->alloc_bytes += bytes;
862 pstat->callsite = callsite;
865 this.callsite = callsite;
866 pstat = page_stat__findnew_caller(&this);
871 pstat->alloc_bytes += bytes;
873 order_stats[order][migrate_type]++;
878 static int evsel__process_page_free_event(struct evsel *evsel, struct perf_sample *sample)
881 unsigned int order = evsel__intval(evsel, sample, "order");
882 u64 bytes = kmem_page_size << order;
883 struct page_stat *pstat;
884 struct page_stat this = {
889 page = evsel__intval(evsel, sample, "pfn");
891 page = evsel__intval(evsel, sample, "page");
894 total_page_free_bytes += bytes;
897 pstat = page_stat__find_page(&this);
899 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
903 total_page_nomatch_bytes += bytes;
908 this.gfp_flags = pstat->gfp_flags;
909 this.migrate_type = pstat->migrate_type;
910 this.callsite = pstat->callsite;
912 rb_erase(&pstat->node, &page_live_tree);
916 order_stats[this.order][this.migrate_type]--;
918 pstat = page_stat__find_alloc(&this);
923 pstat->free_bytes += bytes;
926 pstat = page_stat__find_caller(&this);
931 pstat->free_bytes += bytes;
935 pstat->alloc_bytes -= bytes;
937 if (pstat->nr_alloc == 0) {
938 rb_erase(&pstat->node, &page_caller_tree);
946 static bool perf_kmem__skip_sample(struct perf_sample *sample)
948 /* skip sample based on time? */
949 if (perf_time__skip_sample(&ptime, sample->time))
955 typedef int (*tracepoint_handler)(struct evsel *evsel,
956 struct perf_sample *sample);
958 static int process_sample_event(struct perf_tool *tool __maybe_unused,
959 union perf_event *event,
960 struct perf_sample *sample,
962 struct machine *machine)
965 struct thread *thread = machine__findnew_thread(machine, sample->pid,
968 if (thread == NULL) {
969 pr_debug("problem processing %d event, skipping it.\n",
974 if (perf_kmem__skip_sample(sample))
977 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread__tid(thread));
979 if (evsel->handler != NULL) {
980 tracepoint_handler f = evsel->handler;
981 err = f(evsel, sample);
989 static struct perf_tool perf_kmem = {
990 .sample = process_sample_event,
991 .comm = perf_event__process_comm,
992 .mmap = perf_event__process_mmap,
993 .mmap2 = perf_event__process_mmap2,
994 .namespaces = perf_event__process_namespaces,
995 .ordered_events = true,
998 static double fragmentation(unsigned long n_req, unsigned long n_alloc)
1003 return 100.0 - (100.0 * n_req / n_alloc);
1006 static void __print_slab_result(struct rb_root *root,
1007 struct perf_session *session,
1008 int n_lines, int is_caller)
1010 struct rb_node *next;
1011 struct machine *machine = &session->machines.host;
1013 printf("%.105s\n", graph_dotted_line);
1014 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
1015 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
1016 printf("%.105s\n", graph_dotted_line);
1018 next = rb_first(root);
1020 while (next && n_lines--) {
1021 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
1023 struct symbol *sym = NULL;
1029 addr = data->call_site;
1031 sym = machine__find_kernel_symbol(machine, addr, &map);
1036 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1037 addr - map__unmap_ip(map, sym->start));
1039 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1040 printf(" %-34s |", buf);
1042 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1043 (unsigned long long)data->bytes_alloc,
1044 (unsigned long)data->bytes_alloc / data->hit,
1045 (unsigned long long)data->bytes_req,
1046 (unsigned long)data->bytes_req / data->hit,
1047 (unsigned long)data->hit,
1048 (unsigned long)data->pingpong,
1049 fragmentation(data->bytes_req, data->bytes_alloc));
1051 next = rb_next(next);
1055 printf(" ... | ... | ... | ... | ... | ... \n");
1057 printf("%.105s\n", graph_dotted_line);
1060 static const char * const migrate_type_str[] = {
1069 static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1071 struct rb_node *next = rb_first(&page_alloc_sorted);
1072 struct machine *machine = &session->machines.host;
1074 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1076 printf("\n%.105s\n", graph_dotted_line);
1077 printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1078 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1079 gfp_len, "GFP flags");
1080 printf("%.105s\n", graph_dotted_line);
1083 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1085 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1087 while (next && n_lines--) {
1088 struct page_stat *data;
1094 data = rb_entry(next, struct page_stat, node);
1095 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1099 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1101 printf(format, (unsigned long long)data->page,
1102 (unsigned long long)data->alloc_bytes / 1024,
1103 data->nr_alloc, data->order,
1104 migrate_type_str[data->migrate_type],
1105 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1107 next = rb_next(next);
1110 if (n_lines == -1) {
1111 printf(" ... | ... | ... | ... | ... | %-*s | ...\n",
1115 printf("%.105s\n", graph_dotted_line);
1118 static void __print_page_caller_result(struct perf_session *session, int n_lines)
1120 struct rb_node *next = rb_first(&page_caller_sorted);
1121 struct machine *machine = &session->machines.host;
1122 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1124 printf("\n%.105s\n", graph_dotted_line);
1125 printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1126 live_page ? "Live" : "Total", gfp_len, "GFP flags");
1127 printf("%.105s\n", graph_dotted_line);
1129 while (next && n_lines--) {
1130 struct page_stat *data;
1136 data = rb_entry(next, struct page_stat, node);
1137 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1141 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1143 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1144 (unsigned long long)data->alloc_bytes / 1024,
1145 data->nr_alloc, data->order,
1146 migrate_type_str[data->migrate_type],
1147 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1149 next = rb_next(next);
1152 if (n_lines == -1) {
1153 printf(" ... | ... | ... | ... | %-*s | ...\n",
1157 printf("%.105s\n", graph_dotted_line);
1160 static void print_gfp_flags(void)
1165 printf("# GFP flags\n");
1166 printf("# ---------\n");
1167 for (i = 0; i < nr_gfps; i++) {
1168 printf("# %08x: %*s: %s\n", gfps[i].flags,
1169 (int) max_gfp_len, gfps[i].compact_str,
1170 gfps[i].human_readable);
1174 static void print_slab_summary(void)
1176 printf("\nSUMMARY (SLAB allocator)");
1177 printf("\n========================\n");
1178 printf("Total bytes requested: %'lu\n", total_requested);
1179 printf("Total bytes allocated: %'lu\n", total_allocated);
1180 printf("Total bytes freed: %'lu\n", total_freed);
1181 if (total_allocated > total_freed) {
1182 printf("Net total bytes allocated: %'lu\n",
1183 total_allocated - total_freed);
1185 printf("Total bytes wasted on internal fragmentation: %'lu\n",
1186 total_allocated - total_requested);
1187 printf("Internal fragmentation: %f%%\n",
1188 fragmentation(total_requested, total_allocated));
1189 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1192 static void print_page_summary(void)
1195 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1196 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1198 printf("\nSUMMARY (page allocator)");
1199 printf("\n========================\n");
1200 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1201 nr_page_allocs, total_page_alloc_bytes / 1024);
1202 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
1203 nr_page_frees, total_page_free_bytes / 1024);
1206 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1207 nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1208 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1209 nr_page_allocs - nr_alloc_freed,
1210 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1211 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1212 nr_page_nomatch, total_page_nomatch_bytes / 1024);
1215 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1216 nr_page_fails, total_page_fail_bytes / 1024);
1219 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
1220 "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1221 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
1222 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1223 graph_dotted_line, graph_dotted_line);
1225 for (o = 0; o < MAX_PAGE_ORDER; o++) {
1227 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1228 if (order_stats[o][m])
1229 printf(" %'12d", order_stats[o][m]);
1231 printf(" %12c", '.');
1237 static void print_slab_result(struct perf_session *session)
1240 __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1242 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1243 print_slab_summary();
1246 static void print_page_result(struct perf_session *session)
1248 if (caller_flag || alloc_flag)
1251 __print_page_caller_result(session, caller_lines);
1253 __print_page_alloc_result(session, alloc_lines);
1254 print_page_summary();
1257 static void print_result(struct perf_session *session)
1260 print_slab_result(session);
1262 print_page_result(session);
1265 static LIST_HEAD(slab_caller_sort);
1266 static LIST_HEAD(slab_alloc_sort);
1267 static LIST_HEAD(page_caller_sort);
1268 static LIST_HEAD(page_alloc_sort);
1270 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1271 struct list_head *sort_list)
1273 struct rb_node **new = &(root->rb_node);
1274 struct rb_node *parent = NULL;
1275 struct sort_dimension *sort;
1278 struct alloc_stat *this;
1281 this = rb_entry(*new, struct alloc_stat, node);
1284 list_for_each_entry(sort, sort_list, list) {
1285 cmp = sort->cmp(data, this);
1291 new = &((*new)->rb_left);
1293 new = &((*new)->rb_right);
1296 rb_link_node(&data->node, parent, new);
1297 rb_insert_color(&data->node, root);
1300 static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1301 struct list_head *sort_list)
1303 struct rb_node *node;
1304 struct alloc_stat *data;
1307 node = rb_first(root);
1311 rb_erase(node, root);
1312 data = rb_entry(node, struct alloc_stat, node);
1313 sort_slab_insert(root_sorted, data, sort_list);
1317 static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1318 struct list_head *sort_list)
1320 struct rb_node **new = &root->rb_node;
1321 struct rb_node *parent = NULL;
1322 struct sort_dimension *sort;
1325 struct page_stat *this;
1328 this = rb_entry(*new, struct page_stat, node);
1331 list_for_each_entry(sort, sort_list, list) {
1332 cmp = sort->cmp(data, this);
1338 new = &parent->rb_left;
1340 new = &parent->rb_right;
1343 rb_link_node(&data->node, parent, new);
1344 rb_insert_color(&data->node, root);
1347 static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1348 struct list_head *sort_list)
1350 struct rb_node *node;
1351 struct page_stat *data;
1354 node = rb_first(root);
1358 rb_erase(node, root);
1359 data = rb_entry(node, struct page_stat, node);
1360 sort_page_insert(root_sorted, data, sort_list);
1364 static void sort_result(void)
1367 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1369 __sort_slab_result(&root_caller_stat, &root_caller_sorted,
1374 __sort_page_result(&page_live_tree, &page_alloc_sorted,
1377 __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1380 __sort_page_result(&page_caller_tree, &page_caller_sorted,
1385 static int __cmd_kmem(struct perf_session *session)
1388 struct evsel *evsel;
1389 const struct evsel_str_handler kmem_tracepoints[] = {
1390 /* slab allocator */
1391 { "kmem:kmalloc", evsel__process_alloc_event, },
1392 { "kmem:kmem_cache_alloc", evsel__process_alloc_event, },
1393 { "kmem:kmalloc_node", evsel__process_alloc_event, },
1394 { "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
1395 { "kmem:kfree", evsel__process_free_event, },
1396 { "kmem:kmem_cache_free", evsel__process_free_event, },
1397 /* page allocator */
1398 { "kmem:mm_page_alloc", evsel__process_page_alloc_event, },
1399 { "kmem:mm_page_free", evsel__process_page_free_event, },
1402 if (!perf_session__has_traces(session, "kmem record"))
1405 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1406 pr_err("Initializing perf session tracepoint handlers failed\n");
1410 evlist__for_each_entry(session->evlist, evsel) {
1411 if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") &&
1412 evsel__field(evsel, "pfn")) {
1419 err = perf_session__process_events(session);
1421 pr_err("error during process events: %d\n", err);
1425 print_result(session);
1430 /* slab sort keys */
1431 static int ptr_cmp(void *a, void *b)
1433 struct alloc_stat *l = a;
1434 struct alloc_stat *r = b;
1436 if (l->ptr < r->ptr)
1438 else if (l->ptr > r->ptr)
1443 static struct sort_dimension ptr_sort_dimension = {
1448 static int slab_callsite_cmp(void *a, void *b)
1450 struct alloc_stat *l = a;
1451 struct alloc_stat *r = b;
1453 if (l->call_site < r->call_site)
1455 else if (l->call_site > r->call_site)
1460 static struct sort_dimension callsite_sort_dimension = {
1462 .cmp = slab_callsite_cmp,
1465 static int hit_cmp(void *a, void *b)
1467 struct alloc_stat *l = a;
1468 struct alloc_stat *r = b;
1470 if (l->hit < r->hit)
1472 else if (l->hit > r->hit)
1477 static struct sort_dimension hit_sort_dimension = {
1482 static int bytes_cmp(void *a, void *b)
1484 struct alloc_stat *l = a;
1485 struct alloc_stat *r = b;
1487 if (l->bytes_alloc < r->bytes_alloc)
1489 else if (l->bytes_alloc > r->bytes_alloc)
1494 static struct sort_dimension bytes_sort_dimension = {
1499 static int frag_cmp(void *a, void *b)
1502 struct alloc_stat *l = a;
1503 struct alloc_stat *r = b;
1505 x = fragmentation(l->bytes_req, l->bytes_alloc);
1506 y = fragmentation(r->bytes_req, r->bytes_alloc);
1515 static struct sort_dimension frag_sort_dimension = {
1520 static int pingpong_cmp(void *a, void *b)
1522 struct alloc_stat *l = a;
1523 struct alloc_stat *r = b;
1525 if (l->pingpong < r->pingpong)
1527 else if (l->pingpong > r->pingpong)
1532 static struct sort_dimension pingpong_sort_dimension = {
1534 .cmp = pingpong_cmp,
1537 /* page sort keys */
1538 static int page_cmp(void *a, void *b)
1540 struct page_stat *l = a;
1541 struct page_stat *r = b;
1543 if (l->page < r->page)
1545 else if (l->page > r->page)
1550 static struct sort_dimension page_sort_dimension = {
1555 static int page_callsite_cmp(void *a, void *b)
1557 struct page_stat *l = a;
1558 struct page_stat *r = b;
1560 if (l->callsite < r->callsite)
1562 else if (l->callsite > r->callsite)
1567 static struct sort_dimension page_callsite_sort_dimension = {
1569 .cmp = page_callsite_cmp,
1572 static int page_hit_cmp(void *a, void *b)
1574 struct page_stat *l = a;
1575 struct page_stat *r = b;
1577 if (l->nr_alloc < r->nr_alloc)
1579 else if (l->nr_alloc > r->nr_alloc)
1584 static struct sort_dimension page_hit_sort_dimension = {
1586 .cmp = page_hit_cmp,
1589 static int page_bytes_cmp(void *a, void *b)
1591 struct page_stat *l = a;
1592 struct page_stat *r = b;
1594 if (l->alloc_bytes < r->alloc_bytes)
1596 else if (l->alloc_bytes > r->alloc_bytes)
1601 static struct sort_dimension page_bytes_sort_dimension = {
1603 .cmp = page_bytes_cmp,
1606 static int page_order_cmp(void *a, void *b)
1608 struct page_stat *l = a;
1609 struct page_stat *r = b;
1611 if (l->order < r->order)
1613 else if (l->order > r->order)
1618 static struct sort_dimension page_order_sort_dimension = {
1620 .cmp = page_order_cmp,
1623 static int migrate_type_cmp(void *a, void *b)
1625 struct page_stat *l = a;
1626 struct page_stat *r = b;
1628 /* for internal use to find free'd page */
1629 if (l->migrate_type == -1U)
1632 if (l->migrate_type < r->migrate_type)
1634 else if (l->migrate_type > r->migrate_type)
1639 static struct sort_dimension migrate_type_sort_dimension = {
1641 .cmp = migrate_type_cmp,
1644 static int gfp_flags_cmp(void *a, void *b)
1646 struct page_stat *l = a;
1647 struct page_stat *r = b;
1649 /* for internal use to find free'd page */
1650 if (l->gfp_flags == -1U)
1653 if (l->gfp_flags < r->gfp_flags)
1655 else if (l->gfp_flags > r->gfp_flags)
1660 static struct sort_dimension gfp_flags_sort_dimension = {
1662 .cmp = gfp_flags_cmp,
1665 static struct sort_dimension *slab_sorts[] = {
1666 &ptr_sort_dimension,
1667 &callsite_sort_dimension,
1668 &hit_sort_dimension,
1669 &bytes_sort_dimension,
1670 &frag_sort_dimension,
1671 &pingpong_sort_dimension,
1674 static struct sort_dimension *page_sorts[] = {
1675 &page_sort_dimension,
1676 &page_callsite_sort_dimension,
1677 &page_hit_sort_dimension,
1678 &page_bytes_sort_dimension,
1679 &page_order_sort_dimension,
1680 &migrate_type_sort_dimension,
1681 &gfp_flags_sort_dimension,
1684 static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1686 struct sort_dimension *sort;
1689 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1690 if (!strcmp(slab_sorts[i]->name, tok)) {
1691 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1693 pr_err("%s: memdup failed\n", __func__);
1696 list_add_tail(&sort->list, list);
1704 static int page_sort_dimension__add(const char *tok, struct list_head *list)
1706 struct sort_dimension *sort;
1709 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1710 if (!strcmp(page_sorts[i]->name, tok)) {
1711 sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1713 pr_err("%s: memdup failed\n", __func__);
1716 list_add_tail(&sort->list, list);
1724 static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1727 char *str = strdup(arg);
1731 pr_err("%s: strdup failed\n", __func__);
1736 tok = strsep(&pos, ",");
1739 if (slab_sort_dimension__add(tok, sort_list) < 0) {
1740 pr_err("Unknown slab --sort key: '%s'", tok);
1750 static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1753 char *str = strdup(arg);
1757 pr_err("%s: strdup failed\n", __func__);
1762 tok = strsep(&pos, ",");
1765 if (page_sort_dimension__add(tok, sort_list) < 0) {
1766 pr_err("Unknown page --sort key: '%s'", tok);
1776 static int parse_sort_opt(const struct option *opt __maybe_unused,
1777 const char *arg, int unset __maybe_unused)
1782 if (kmem_page > kmem_slab ||
1783 (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1784 if (caller_flag > alloc_flag)
1785 return setup_page_sorting(&page_caller_sort, arg);
1787 return setup_page_sorting(&page_alloc_sort, arg);
1789 if (caller_flag > alloc_flag)
1790 return setup_slab_sorting(&slab_caller_sort, arg);
1792 return setup_slab_sorting(&slab_alloc_sort, arg);
1798 static int parse_caller_opt(const struct option *opt __maybe_unused,
1799 const char *arg __maybe_unused,
1800 int unset __maybe_unused)
1802 caller_flag = (alloc_flag + 1);
1806 static int parse_alloc_opt(const struct option *opt __maybe_unused,
1807 const char *arg __maybe_unused,
1808 int unset __maybe_unused)
1810 alloc_flag = (caller_flag + 1);
1814 static int parse_slab_opt(const struct option *opt __maybe_unused,
1815 const char *arg __maybe_unused,
1816 int unset __maybe_unused)
1818 kmem_slab = (kmem_page + 1);
1822 static int parse_page_opt(const struct option *opt __maybe_unused,
1823 const char *arg __maybe_unused,
1824 int unset __maybe_unused)
1826 kmem_page = (kmem_slab + 1);
1830 static int parse_line_opt(const struct option *opt __maybe_unused,
1831 const char *arg, int unset __maybe_unused)
1838 lines = strtoul(arg, NULL, 10);
1840 if (caller_flag > alloc_flag)
1841 caller_lines = lines;
1843 alloc_lines = lines;
1848 static bool slab_legacy_tp_is_exposed(void)
1851 * The tracepoints "kmem:kmalloc_node" and
1852 * "kmem:kmem_cache_alloc_node" have been removed on the latest
1853 * kernel, if the tracepoint "kmem:kmalloc_node" is existed it
1854 * means the tool is running on an old kernel, we need to
1855 * rollback to support these legacy tracepoints.
1857 return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
1861 static int __cmd_record(int argc, const char **argv)
1863 const char * const record_args[] = {
1864 "record", "-a", "-R", "-c", "1",
1866 const char * const slab_events[] = {
1867 "-e", "kmem:kmalloc",
1869 "-e", "kmem:kmem_cache_alloc",
1870 "-e", "kmem:kmem_cache_free",
1872 const char * const slab_legacy_events[] = {
1873 "-e", "kmem:kmalloc_node",
1874 "-e", "kmem:kmem_cache_alloc_node",
1876 const char * const page_events[] = {
1877 "-e", "kmem:mm_page_alloc",
1878 "-e", "kmem:mm_page_free",
1880 unsigned int rec_argc, i, j;
1881 const char **rec_argv;
1882 unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
1884 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1886 rec_argc += ARRAY_SIZE(slab_events);
1887 if (slab_legacy_tp_exposed)
1888 rec_argc += ARRAY_SIZE(slab_legacy_events);
1891 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1893 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1895 if (rec_argv == NULL)
1898 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1899 rec_argv[i] = strdup(record_args[i]);
1902 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1903 rec_argv[i] = strdup(slab_events[j]);
1904 if (slab_legacy_tp_exposed) {
1905 for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
1906 rec_argv[i] = strdup(slab_legacy_events[j]);
1910 rec_argv[i++] = strdup("-g");
1912 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1913 rec_argv[i] = strdup(page_events[j]);
1916 for (j = 1; j < (unsigned int)argc; j++, i++)
1917 rec_argv[i] = argv[j];
1919 return cmd_record(i, rec_argv);
1922 static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1924 if (!strcmp(var, "kmem.default")) {
1925 if (!strcmp(value, "slab"))
1926 kmem_default = KMEM_SLAB;
1927 else if (!strcmp(value, "page"))
1928 kmem_default = KMEM_PAGE;
1930 pr_err("invalid default value ('slab' or 'page' required): %s\n",
1938 int cmd_kmem(int argc, const char **argv)
1940 const char * const default_slab_sort = "frag,hit,bytes";
1941 const char * const default_page_sort = "bytes,hit";
1942 struct perf_data data = {
1943 .mode = PERF_DATA_MODE_READ,
1945 const struct option kmem_options[] = {
1946 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1947 OPT_INCR('v', "verbose", &verbose,
1948 "be more verbose (show symbol address, etc)"),
1949 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1950 "show per-callsite statistics", parse_caller_opt),
1951 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1952 "show per-allocation statistics", parse_alloc_opt),
1953 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1954 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1955 "page, order, migtype, gfp", parse_sort_opt),
1956 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1957 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1958 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1959 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1961 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1963 OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1964 OPT_STRING(0, "time", &time_str, "str",
1965 "Time span of interest (start,stop)"),
1968 const char *const kmem_subcommands[] = { "record", "stat", NULL };
1969 const char *kmem_usage[] = {
1973 struct perf_session *session;
1974 static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
1975 int ret = perf_config(kmem_config, NULL);
1980 argc = parse_options_subcommand(argc, argv, kmem_options,
1981 kmem_subcommands, kmem_usage,
1982 PARSE_OPT_STOP_AT_NON_OPTION);
1985 usage_with_options(kmem_usage, kmem_options);
1987 if (kmem_slab == 0 && kmem_page == 0) {
1988 if (kmem_default == KMEM_SLAB)
1994 if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
1996 return __cmd_record(argc, argv);
1999 data.path = input_name;
2001 kmem_session = session = perf_session__new(&data, &perf_kmem);
2002 if (IS_ERR(session))
2003 return PTR_ERR(session);
2008 if (!evlist__find_tracepoint_by_name(session->evlist, "kmem:kmalloc")) {
2009 pr_err(errmsg, "slab", "slab");
2015 struct evsel *evsel = evlist__find_tracepoint_by_name(session->evlist, "kmem:mm_page_alloc");
2017 if (evsel == NULL) {
2018 pr_err(errmsg, "page", "page");
2022 kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
2023 symbol_conf.use_callchain = true;
2026 symbol__init(&session->header.env);
2028 if (perf_time__parse_str(&ptime, time_str) != 0) {
2029 pr_err("Invalid time string\n");
2034 if (!strcmp(argv[0], "stat")) {
2035 setlocale(LC_ALL, "");
2037 if (cpu__setup_cpunode_map())
2040 if (list_empty(&slab_caller_sort))
2041 setup_slab_sorting(&slab_caller_sort, default_slab_sort);
2042 if (list_empty(&slab_alloc_sort))
2043 setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
2044 if (list_empty(&page_caller_sort))
2045 setup_page_sorting(&page_caller_sort, default_page_sort);
2046 if (list_empty(&page_alloc_sort))
2047 setup_page_sorting(&page_alloc_sort, default_page_sort);
2050 setup_page_sorting(&page_alloc_sort_input,
2051 "page,order,migtype,gfp");
2052 setup_page_sorting(&page_caller_sort_input,
2053 "callsite,order,migtype,gfp");
2055 ret = __cmd_kmem(session);
2057 usage_with_options(kmem_usage, kmem_options);
2060 perf_session__delete(session);