1 // SPDX-License-Identifier: GPL-2.0
7 #include <linux/capability.h>
8 #include <linux/kernel.h>
9 #include <linux/mman.h>
10 #include <linux/string.h>
11 #include <linux/time64.h>
12 #include <sys/types.h>
14 #include <sys/param.h>
22 #include "util.h" // lsdir()
28 #include "map_symbol.h"
29 #include "mem-events.h"
33 #include "namespaces.h"
36 #include <linux/ctype.h>
37 #include <linux/zalloc.h>
41 #include <symbol/kallsyms.h>
42 #include <sys/utsname.h>
44 static int dso__load_kernel_sym(struct dso *dso, struct map *map);
45 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map);
46 static bool symbol__is_idle(const char *name);
48 int vmlinux_path__nr_entries;
51 struct map_list_node {
52 struct list_head node;
56 struct symbol_conf symbol_conf = {
59 .try_vmlinux_path = true,
61 .demangle_kernel = false,
62 .cumulate_callchain = true,
63 .time_quantum = 100 * NSEC_PER_MSEC, /* 100ms */
64 .show_hist_headers = true,
71 static enum dso_binary_type binary_type_symtab[] = {
72 DSO_BINARY_TYPE__KALLSYMS,
73 DSO_BINARY_TYPE__GUEST_KALLSYMS,
74 DSO_BINARY_TYPE__JAVA_JIT,
75 DSO_BINARY_TYPE__DEBUGLINK,
76 DSO_BINARY_TYPE__BUILD_ID_CACHE,
77 DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO,
78 DSO_BINARY_TYPE__FEDORA_DEBUGINFO,
79 DSO_BINARY_TYPE__UBUNTU_DEBUGINFO,
80 DSO_BINARY_TYPE__BUILDID_DEBUGINFO,
81 DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
82 DSO_BINARY_TYPE__GUEST_KMODULE,
83 DSO_BINARY_TYPE__GUEST_KMODULE_COMP,
84 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
85 DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP,
86 DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO,
87 DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO,
88 DSO_BINARY_TYPE__NOT_FOUND,
91 #define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab)
93 static struct map_list_node *map_list_node__new(void)
95 return malloc(sizeof(struct map_list_node));
98 static bool symbol_type__filter(char symbol_type)
100 symbol_type = toupper(symbol_type);
101 return symbol_type == 'T' || symbol_type == 'W' || symbol_type == 'D' || symbol_type == 'B';
104 static int prefix_underscores_count(const char *str)
106 const char *tail = str;
114 const char * __weak arch__normalize_symbol_name(const char *name)
119 int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
121 return strcmp(namea, nameb);
124 int __weak arch__compare_symbol_names_n(const char *namea, const char *nameb,
127 return strncmp(namea, nameb, n);
130 int __weak arch__choose_best_symbol(struct symbol *syma,
131 struct symbol *symb __maybe_unused)
133 /* Avoid "SyS" kernel syscall aliases */
134 if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
136 if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
142 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
148 /* Prefer a symbol with non zero length */
149 a = syma->end - syma->start;
150 b = symb->end - symb->start;
151 if ((b == 0) && (a > 0))
153 else if ((a == 0) && (b > 0))
156 /* Prefer a non weak symbol over a weak one */
157 a = syma->binding == STB_WEAK;
158 b = symb->binding == STB_WEAK;
164 /* Prefer a global symbol over a non global one */
165 a = syma->binding == STB_GLOBAL;
166 b = symb->binding == STB_GLOBAL;
172 /* Prefer a symbol with less underscores */
173 a = prefix_underscores_count(syma->name);
174 b = prefix_underscores_count(symb->name);
180 /* Choose the symbol with the longest name */
181 na = strlen(syma->name);
182 nb = strlen(symb->name);
188 return arch__choose_best_symbol(syma, symb);
191 void symbols__fixup_duplicate(struct rb_root_cached *symbols)
194 struct symbol *curr, *next;
196 if (symbol_conf.allow_aliases)
199 nd = rb_first_cached(symbols);
202 curr = rb_entry(nd, struct symbol, rb_node);
204 nd = rb_next(&curr->rb_node);
205 next = rb_entry(nd, struct symbol, rb_node);
210 if (curr->start != next->start)
213 if (choose_best_symbol(curr, next) == SYMBOL_A) {
214 if (next->type == STT_GNU_IFUNC)
215 curr->ifunc_alias = true;
216 rb_erase_cached(&next->rb_node, symbols);
217 symbol__delete(next);
220 if (curr->type == STT_GNU_IFUNC)
221 next->ifunc_alias = true;
222 nd = rb_next(&curr->rb_node);
223 rb_erase_cached(&curr->rb_node, symbols);
224 symbol__delete(curr);
229 /* Update zero-sized symbols using the address of the next symbol */
230 void symbols__fixup_end(struct rb_root_cached *symbols, bool is_kallsyms)
232 struct rb_node *nd, *prevnd = rb_first_cached(symbols);
233 struct symbol *curr, *prev;
238 curr = rb_entry(prevnd, struct symbol, rb_node);
240 for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
242 curr = rb_entry(nd, struct symbol, rb_node);
245 * On some architecture kernel text segment start is located at
246 * some low memory address, while modules are located at high
247 * memory addresses (or vice versa). The gap between end of
248 * kernel text segment and beginning of first module's text
249 * segment is very big. Therefore do not fill this gap and do
250 * not assign it to the kernel dso map (kallsyms).
252 * In kallsyms, it determines module symbols using '[' character
254 * ffffffffc1937000 T hdmi_driver_init [snd_hda_codec_hdmi]
256 if (prev->end == prev->start) {
257 /* Last kernel/module symbol mapped to end of page */
258 if (is_kallsyms && (!strchr(prev->name, '[') !=
259 !strchr(curr->name, '[')))
260 prev->end = roundup(prev->end + 4096, 4096);
262 prev->end = curr->start;
264 pr_debug4("%s sym:%s end:%#" PRIx64 "\n",
265 __func__, prev->name, prev->end);
270 if (curr->end == curr->start)
271 curr->end = roundup(curr->start, 4096) + 4096;
274 void maps__fixup_end(struct maps *maps)
276 struct map_rb_node *prev = NULL, *curr;
278 down_write(maps__lock(maps));
280 maps__for_each_entry(maps, curr) {
281 if (prev != NULL && !map__end(prev->map))
282 map__set_end(prev->map, map__start(curr->map));
288 * We still haven't the actual symbols, so guess the
289 * last map final address.
291 if (curr && !map__end(curr->map))
292 map__set_end(curr->map, ~0ULL);
294 up_write(maps__lock(maps));
297 struct symbol *symbol__new(u64 start, u64 len, u8 binding, u8 type, const char *name)
299 size_t namelen = strlen(name) + 1;
300 struct symbol *sym = calloc(1, (symbol_conf.priv_size +
301 sizeof(*sym) + namelen));
305 if (symbol_conf.priv_size) {
306 if (symbol_conf.init_annotation) {
307 struct annotation *notes = (void *)sym;
308 annotation__init(notes);
310 sym = ((void *)sym) + symbol_conf.priv_size;
314 sym->end = len ? start + len : start;
316 sym->binding = binding;
317 sym->namelen = namelen - 1;
319 pr_debug4("%s: %s %#" PRIx64 "-%#" PRIx64 "\n",
320 __func__, name, start, sym->end);
321 memcpy(sym->name, name, namelen);
326 void symbol__delete(struct symbol *sym)
328 if (symbol_conf.priv_size) {
329 if (symbol_conf.init_annotation) {
330 struct annotation *notes = symbol__annotation(sym);
332 annotation__exit(notes);
335 free(((void *)sym) - symbol_conf.priv_size);
338 void symbols__delete(struct rb_root_cached *symbols)
341 struct rb_node *next = rb_first_cached(symbols);
344 pos = rb_entry(next, struct symbol, rb_node);
345 next = rb_next(&pos->rb_node);
346 rb_erase_cached(&pos->rb_node, symbols);
351 void __symbols__insert(struct rb_root_cached *symbols,
352 struct symbol *sym, bool kernel)
354 struct rb_node **p = &symbols->rb_root.rb_node;
355 struct rb_node *parent = NULL;
356 const u64 ip = sym->start;
358 bool leftmost = true;
361 const char *name = sym->name;
363 * ppc64 uses function descriptors and appends a '.' to the
364 * start of every instruction address. Remove it.
368 sym->idle = symbol__is_idle(name);
373 s = rb_entry(parent, struct symbol, rb_node);
381 rb_link_node(&sym->rb_node, parent, p);
382 rb_insert_color_cached(&sym->rb_node, symbols, leftmost);
385 void symbols__insert(struct rb_root_cached *symbols, struct symbol *sym)
387 __symbols__insert(symbols, sym, false);
390 static struct symbol *symbols__find(struct rb_root_cached *symbols, u64 ip)
397 n = symbols->rb_root.rb_node;
400 struct symbol *s = rb_entry(n, struct symbol, rb_node);
404 else if (ip > s->end || (ip == s->end && ip != s->start))
413 static struct symbol *symbols__first(struct rb_root_cached *symbols)
415 struct rb_node *n = rb_first_cached(symbols);
418 return rb_entry(n, struct symbol, rb_node);
423 static struct symbol *symbols__last(struct rb_root_cached *symbols)
425 struct rb_node *n = rb_last(&symbols->rb_root);
428 return rb_entry(n, struct symbol, rb_node);
433 static struct symbol *symbols__next(struct symbol *sym)
435 struct rb_node *n = rb_next(&sym->rb_node);
438 return rb_entry(n, struct symbol, rb_node);
443 static int symbols__sort_name_cmp(const void *vlhs, const void *vrhs)
445 const struct symbol *lhs = *((const struct symbol **)vlhs);
446 const struct symbol *rhs = *((const struct symbol **)vrhs);
448 return strcmp(lhs->name, rhs->name);
451 static struct symbol **symbols__sort_by_name(struct rb_root_cached *source, size_t *len)
454 struct symbol **result;
455 size_t i = 0, size = 0;
457 for (nd = rb_first_cached(source); nd; nd = rb_next(nd))
460 result = malloc(sizeof(*result) * size);
464 for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
465 struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
469 qsort(result, size, sizeof(*result), symbols__sort_name_cmp);
474 int symbol__match_symbol_name(const char *name, const char *str,
475 enum symbol_tag_include includes)
477 const char *versioning;
479 if (includes == SYMBOL_TAG_INCLUDE__DEFAULT_ONLY &&
480 (versioning = strstr(name, "@@"))) {
481 int len = strlen(str);
483 if (len < versioning - name)
484 len = versioning - name;
486 return arch__compare_symbol_names_n(name, str, len);
488 return arch__compare_symbol_names(name, str);
491 static struct symbol *symbols__find_by_name(struct symbol *symbols[],
494 enum symbol_tag_include includes,
497 size_t i, lower = 0, upper = symbols_len;
498 struct symbol *s = NULL;
501 *found_idx = SIZE_MAX;
506 while (lower < upper) {
509 i = (lower + upper) / 2;
510 cmp = symbol__match_symbol_name(symbols[i]->name, name, includes);
523 if (s && includes != SYMBOL_TAG_INCLUDE__DEFAULT_ONLY) {
524 /* return first symbol that has same name (if any) */
526 struct symbol *tmp = symbols[i - 1];
528 if (!arch__compare_symbol_names(tmp->name, s->name)) {
536 assert(!found_idx || !s || s == symbols[*found_idx]);
540 void dso__reset_find_symbol_cache(struct dso *dso)
542 dso->last_find_result.addr = 0;
543 dso->last_find_result.symbol = NULL;
546 void dso__insert_symbol(struct dso *dso, struct symbol *sym)
548 __symbols__insert(&dso->symbols, sym, dso->kernel);
550 /* update the symbol cache if necessary */
551 if (dso->last_find_result.addr >= sym->start &&
552 (dso->last_find_result.addr < sym->end ||
553 sym->start == sym->end)) {
554 dso->last_find_result.symbol = sym;
558 void dso__delete_symbol(struct dso *dso, struct symbol *sym)
560 rb_erase_cached(&sym->rb_node, &dso->symbols);
562 dso__reset_find_symbol_cache(dso);
565 struct symbol *dso__find_symbol(struct dso *dso, u64 addr)
567 if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) {
568 dso->last_find_result.addr = addr;
569 dso->last_find_result.symbol = symbols__find(&dso->symbols, addr);
572 return dso->last_find_result.symbol;
575 struct symbol *dso__find_symbol_nocache(struct dso *dso, u64 addr)
577 return symbols__find(&dso->symbols, addr);
580 struct symbol *dso__first_symbol(struct dso *dso)
582 return symbols__first(&dso->symbols);
585 struct symbol *dso__last_symbol(struct dso *dso)
587 return symbols__last(&dso->symbols);
590 struct symbol *dso__next_symbol(struct symbol *sym)
592 return symbols__next(sym);
595 struct symbol *dso__next_symbol_by_name(struct dso *dso, size_t *idx)
597 if (*idx + 1 >= dso->symbol_names_len)
601 return dso->symbol_names[*idx];
605 * Returns first symbol that matched with @name.
607 struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name, size_t *idx)
609 struct symbol *s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
610 name, SYMBOL_TAG_INCLUDE__NONE, idx);
612 s = symbols__find_by_name(dso->symbol_names, dso->symbol_names_len,
613 name, SYMBOL_TAG_INCLUDE__DEFAULT_ONLY, idx);
617 void dso__sort_by_name(struct dso *dso)
619 mutex_lock(&dso->lock);
620 if (!dso__sorted_by_name(dso)) {
623 dso->symbol_names = symbols__sort_by_name(&dso->symbols, &len);
624 if (dso->symbol_names) {
625 dso->symbol_names_len = len;
626 dso__set_sorted_by_name(dso);
629 mutex_unlock(&dso->lock);
633 * While we find nice hex chars, build a long_val.
634 * Return number of chars processed.
636 static int hex2u64(const char *ptr, u64 *long_val)
640 *long_val = strtoull(ptr, &p, 16);
646 int modules__parse(const char *filename, void *arg,
647 int (*process_module)(void *arg, const char *name,
648 u64 start, u64 size))
655 file = fopen(filename, "r");
665 line_len = getline(&line, &n, file);
678 line[--line_len] = '\0'; /* \n */
680 sep = strrchr(line, 'x');
684 hex2u64(sep + 1, &start);
686 sep = strchr(line, ' ');
692 scnprintf(name, sizeof(name), "[%s]", line);
694 size = strtoul(sep + 1, &endptr, 0);
695 if (*endptr != ' ' && *endptr != '\t')
698 err = process_module(arg, name, start, size);
709 * These are symbols in the kernel image, so make sure that
710 * sym is from a kernel DSO.
712 static bool symbol__is_idle(const char *name)
714 const char * const idle_symbols[] = {
715 "acpi_idle_do_entry",
716 "acpi_processor_ffh_cstate_enter",
727 "mwait_idle_with_hints",
728 "mwait_idle_with_hints.constprop.0",
730 "ppc64_runlatch_off",
731 "pseries_dedicated_idle_sleep",
737 static struct strlist *idle_symbols_list;
739 if (idle_symbols_list)
740 return strlist__has_entry(idle_symbols_list, name);
742 idle_symbols_list = strlist__new(NULL, NULL);
744 for (i = 0; idle_symbols[i]; i++)
745 strlist__add(idle_symbols_list, idle_symbols[i]);
747 return strlist__has_entry(idle_symbols_list, name);
750 static int map__process_kallsym_symbol(void *arg, const char *name,
751 char type, u64 start)
754 struct dso *dso = arg;
755 struct rb_root_cached *root = &dso->symbols;
757 if (!symbol_type__filter(type))
760 /* Ignore local symbols for ARM modules */
765 * module symbols are not sorted so we add all
766 * symbols, setting length to 0, and rely on
767 * symbols__fixup_end() to fix it up.
769 sym = symbol__new(start, 0, kallsyms2elf_binding(type), kallsyms2elf_type(type), name);
773 * We will pass the symbols to the filter later, in
774 * map__split_kallsyms, when we have split the maps per module
776 __symbols__insert(root, sym, !strchr(name, '['));
782 * Loads the function entries in /proc/kallsyms into kernel_map->dso,
783 * so that we can in the next step set the symbol ->end address and then
784 * call kernel_maps__split_kallsyms.
786 static int dso__load_all_kallsyms(struct dso *dso, const char *filename)
788 return kallsyms__parse(filename, dso, map__process_kallsym_symbol);
791 static int maps__split_kallsyms_for_kcore(struct maps *kmaps, struct dso *dso)
793 struct map *curr_map;
796 struct rb_root_cached old_root = dso->symbols;
797 struct rb_root_cached *root = &dso->symbols;
798 struct rb_node *next = rb_first_cached(root);
803 *root = RB_ROOT_CACHED;
806 struct dso *curr_map_dso;
809 pos = rb_entry(next, struct symbol, rb_node);
810 next = rb_next(&pos->rb_node);
812 rb_erase_cached(&pos->rb_node, &old_root);
813 RB_CLEAR_NODE(&pos->rb_node);
814 module = strchr(pos->name, '\t');
818 curr_map = maps__find(kmaps, pos->start);
824 curr_map_dso = map__dso(curr_map);
825 pos->start -= map__start(curr_map) - map__pgoff(curr_map);
826 if (pos->end > map__end(curr_map))
827 pos->end = map__end(curr_map);
829 pos->end -= map__start(curr_map) - map__pgoff(curr_map);
830 symbols__insert(&curr_map_dso->symbols, pos);
834 /* Symbols have been adjusted */
835 dso->adjust_symbols = 1;
841 * Split the symbols into maps, making sure there are no overlaps, i.e. the
842 * kernel range is broken in several maps, named [kernel].N, as we don't have
843 * the original ELF section names vmlinux have.
845 static int maps__split_kallsyms(struct maps *kmaps, struct dso *dso, u64 delta,
846 struct map *initial_map)
848 struct machine *machine;
849 struct map *curr_map = initial_map;
851 int count = 0, moved = 0;
852 struct rb_root_cached *root = &dso->symbols;
853 struct rb_node *next = rb_first_cached(root);
854 int kernel_range = 0;
860 machine = maps__machine(kmaps);
862 x86_64 = machine__is(machine, "x86_64");
867 pos = rb_entry(next, struct symbol, rb_node);
868 next = rb_next(&pos->rb_node);
870 module = strchr(pos->name, '\t');
872 struct dso *curr_map_dso;
874 if (!symbol_conf.use_modules)
878 curr_map_dso = map__dso(curr_map);
879 if (strcmp(curr_map_dso->short_name, module)) {
880 if (RC_CHK_ACCESS(curr_map) != RC_CHK_ACCESS(initial_map) &&
881 dso->kernel == DSO_SPACE__KERNEL_GUEST &&
882 machine__is_default_guest(machine)) {
884 * We assume all symbols of a module are
885 * continuous in * kallsyms, so curr_map
886 * points to a module and all its
887 * symbols are in its kmap. Mark it as
890 dso__set_loaded(curr_map_dso);
893 curr_map = maps__find_by_name(kmaps, module);
894 if (curr_map == NULL) {
895 pr_debug("%s/proc/{kallsyms,modules} "
896 "inconsistency while looking "
897 "for \"%s\" module!\n",
898 machine->root_dir, module);
899 curr_map = initial_map;
902 curr_map_dso = map__dso(curr_map);
903 if (curr_map_dso->loaded &&
904 !machine__is_default_guest(machine))
908 * So that we look just like we get from .ko files,
909 * i.e. not prelinked, relative to initial_map->start.
911 pos->start = map__map_ip(curr_map, pos->start);
912 pos->end = map__map_ip(curr_map, pos->end);
913 } else if (x86_64 && is_entry_trampoline(pos->name)) {
915 * These symbols are not needed anymore since the
916 * trampoline maps refer to the text section and it's
917 * symbols instead. Avoid having to deal with
918 * relocations, and the assumption that the first symbol
919 * is the start of kernel text, by simply removing the
920 * symbols at this point.
923 } else if (curr_map != initial_map) {
924 char dso_name[PATH_MAX];
928 /* Kernel was relocated at boot time */
934 curr_map = initial_map;
938 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
939 snprintf(dso_name, sizeof(dso_name),
943 snprintf(dso_name, sizeof(dso_name),
947 ndso = dso__new(dso_name);
951 ndso->kernel = dso->kernel;
953 curr_map = map__new2(pos->start, ndso);
954 if (curr_map == NULL) {
959 map__set_map_ip(curr_map, identity__map_ip);
960 map__set_unmap_ip(curr_map, identity__map_ip);
961 if (maps__insert(kmaps, curr_map)) {
967 /* Kernel was relocated at boot time */
972 if (curr_map != initial_map) {
973 struct dso *curr_map_dso = map__dso(curr_map);
975 rb_erase_cached(&pos->rb_node, root);
976 symbols__insert(&curr_map_dso->symbols, pos);
983 rb_erase_cached(&pos->rb_node, root);
987 if (curr_map != initial_map &&
988 dso->kernel == DSO_SPACE__KERNEL_GUEST &&
989 machine__is_default_guest(maps__machine(kmaps))) {
990 dso__set_loaded(map__dso(curr_map));
993 return count + moved;
996 bool symbol__restricted_filename(const char *filename,
997 const char *restricted_filename)
999 bool restricted = false;
1001 if (symbol_conf.kptr_restrict) {
1002 char *r = realpath(filename, NULL);
1005 restricted = strcmp(r, restricted_filename) == 0;
1014 struct module_info {
1015 struct rb_node rb_node;
1020 static void add_module(struct module_info *mi, struct rb_root *modules)
1022 struct rb_node **p = &modules->rb_node;
1023 struct rb_node *parent = NULL;
1024 struct module_info *m;
1026 while (*p != NULL) {
1028 m = rb_entry(parent, struct module_info, rb_node);
1029 if (strcmp(mi->name, m->name) < 0)
1032 p = &(*p)->rb_right;
1034 rb_link_node(&mi->rb_node, parent, p);
1035 rb_insert_color(&mi->rb_node, modules);
1038 static void delete_modules(struct rb_root *modules)
1040 struct module_info *mi;
1041 struct rb_node *next = rb_first(modules);
1044 mi = rb_entry(next, struct module_info, rb_node);
1045 next = rb_next(&mi->rb_node);
1046 rb_erase(&mi->rb_node, modules);
1052 static struct module_info *find_module(const char *name,
1053 struct rb_root *modules)
1055 struct rb_node *n = modules->rb_node;
1058 struct module_info *m;
1061 m = rb_entry(n, struct module_info, rb_node);
1062 cmp = strcmp(name, m->name);
1074 static int __read_proc_modules(void *arg, const char *name, u64 start,
1075 u64 size __maybe_unused)
1077 struct rb_root *modules = arg;
1078 struct module_info *mi;
1080 mi = zalloc(sizeof(struct module_info));
1084 mi->name = strdup(name);
1092 add_module(mi, modules);
1097 static int read_proc_modules(const char *filename, struct rb_root *modules)
1099 if (symbol__restricted_filename(filename, "/proc/modules"))
1102 if (modules__parse(filename, modules, __read_proc_modules)) {
1103 delete_modules(modules);
1110 int compare_proc_modules(const char *from, const char *to)
1112 struct rb_root from_modules = RB_ROOT;
1113 struct rb_root to_modules = RB_ROOT;
1114 struct rb_node *from_node, *to_node;
1115 struct module_info *from_m, *to_m;
1118 if (read_proc_modules(from, &from_modules))
1121 if (read_proc_modules(to, &to_modules))
1122 goto out_delete_from;
1124 from_node = rb_first(&from_modules);
1125 to_node = rb_first(&to_modules);
1130 from_m = rb_entry(from_node, struct module_info, rb_node);
1131 to_m = rb_entry(to_node, struct module_info, rb_node);
1133 if (from_m->start != to_m->start ||
1134 strcmp(from_m->name, to_m->name))
1137 from_node = rb_next(from_node);
1138 to_node = rb_next(to_node);
1141 if (!from_node && !to_node)
1144 delete_modules(&to_modules);
1146 delete_modules(&from_modules);
1151 static int do_validate_kcore_modules(const char *filename, struct maps *kmaps)
1153 struct rb_root modules = RB_ROOT;
1154 struct map_rb_node *old_node;
1157 err = read_proc_modules(filename, &modules);
1161 maps__for_each_entry(kmaps, old_node) {
1162 struct map *old_map = old_node->map;
1163 struct module_info *mi;
1166 if (!__map__is_kmodule(old_map)) {
1169 dso = map__dso(old_map);
1170 /* Module must be in memory at the same address */
1171 mi = find_module(dso->short_name, &modules);
1172 if (!mi || mi->start != map__start(old_map)) {
1178 delete_modules(&modules);
1183 * If kallsyms is referenced by name then we look for filename in the same
1186 static bool filename_from_kallsyms_filename(char *filename,
1187 const char *base_name,
1188 const char *kallsyms_filename)
1192 strcpy(filename, kallsyms_filename);
1193 name = strrchr(filename, '/');
1199 if (!strcmp(name, "kallsyms")) {
1200 strcpy(name, base_name);
1207 static int validate_kcore_modules(const char *kallsyms_filename,
1210 struct maps *kmaps = map__kmaps(map);
1211 char modules_filename[PATH_MAX];
1216 if (!filename_from_kallsyms_filename(modules_filename, "modules",
1220 if (do_validate_kcore_modules(modules_filename, kmaps))
1226 static int validate_kcore_addresses(const char *kallsyms_filename,
1229 struct kmap *kmap = map__kmap(map);
1234 if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
1237 if (kallsyms__get_function_start(kallsyms_filename,
1238 kmap->ref_reloc_sym->name, &start))
1240 if (start != kmap->ref_reloc_sym->addr)
1244 return validate_kcore_modules(kallsyms_filename, map);
1247 struct kcore_mapfn_data {
1249 struct list_head maps;
1252 static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
1254 struct kcore_mapfn_data *md = data;
1255 struct map_list_node *list_node = map_list_node__new();
1260 list_node->map = map__new2(start, md->dso);
1261 if (!list_node->map) {
1266 map__set_end(list_node->map, map__start(list_node->map) + len);
1267 map__set_pgoff(list_node->map, pgoff);
1269 list_add(&list_node->node, &md->maps);
1275 * Merges map into maps by splitting the new map within the existing map
1278 int maps__merge_in(struct maps *kmaps, struct map *new_map)
1280 struct map_rb_node *rb_node;
1284 maps__for_each_entry(kmaps, rb_node) {
1285 struct map *old_map = rb_node->map;
1287 /* no overload with this one */
1288 if (map__end(new_map) < map__start(old_map) ||
1289 map__start(new_map) >= map__end(old_map))
1292 if (map__start(new_map) < map__start(old_map)) {
1297 if (map__end(new_map) < map__end(old_map)) {
1299 * |new......| -> |new..|
1300 * |old....| -> |old....|
1302 map__set_end(new_map, map__start(old_map));
1305 * |new.............| -> |new..| |new..|
1306 * |old....| -> |old....|
1308 struct map_list_node *m = map_list_node__new();
1315 m->map = map__clone(new_map);
1322 map__set_end(m->map, map__start(old_map));
1323 list_add_tail(&m->node, &merged);
1324 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
1325 map__set_start(new_map, map__end(old_map));
1332 if (map__end(new_map) < map__end(old_map)) {
1335 * |old.........| -> |old.........|
1342 * |new......| -> |new...|
1343 * |old....| -> |old....|
1345 map__add_pgoff(new_map, map__end(old_map) - map__start(new_map));
1346 map__set_start(new_map, map__end(old_map));
1352 while (!list_empty(&merged)) {
1353 struct map_list_node *old_node;
1355 old_node = list_entry(merged.next, struct map_list_node, node);
1356 list_del_init(&old_node->node);
1358 err = maps__insert(kmaps, old_node->map);
1359 map__put(old_node->map);
1365 err = maps__insert(kmaps, new_map);
1371 static int dso__load_kcore(struct dso *dso, struct map *map,
1372 const char *kallsyms_filename)
1374 struct maps *kmaps = map__kmaps(map);
1375 struct kcore_mapfn_data md;
1376 struct map *replacement_map = NULL;
1377 struct map_rb_node *old_node, *next;
1378 struct machine *machine;
1381 char kcore_filename[PATH_MAX];
1387 machine = maps__machine(kmaps);
1389 /* This function requires that the map is the kernel map */
1390 if (!__map__is_kernel(map))
1393 if (!filename_from_kallsyms_filename(kcore_filename, "kcore",
1397 /* Modules and kernel must be present at their original addresses */
1398 if (validate_kcore_addresses(kallsyms_filename, map))
1402 INIT_LIST_HEAD(&md.maps);
1404 fd = open(kcore_filename, O_RDONLY);
1406 pr_debug("Failed to open %s. Note /proc/kcore requires CAP_SYS_RAWIO capability to access.\n",
1411 /* Read new maps into temporary lists */
1412 err = file__read_maps(fd, map__prot(map) & PROT_EXEC, kcore_mapfn, &md,
1416 dso->is_64_bit = is_64_bit;
1418 if (list_empty(&md.maps)) {
1423 /* Remove old maps */
1424 maps__for_each_entry_safe(kmaps, old_node, next) {
1425 struct map *old_map = old_node->map;
1428 * We need to preserve eBPF maps even if they are
1429 * covered by kcore, because we need to access
1430 * eBPF dso for source data.
1432 if (old_map != map && !__map__is_bpf_prog(old_map))
1433 maps__remove(kmaps, old_map);
1435 machine->trampolines_mapped = false;
1437 /* Find the kernel map using the '_stext' symbol */
1438 if (!kallsyms__get_function_start(kallsyms_filename, "_stext", &stext)) {
1439 u64 replacement_size = 0;
1440 struct map_list_node *new_node;
1442 list_for_each_entry(new_node, &md.maps, node) {
1443 struct map *new_map = new_node->map;
1444 u64 new_size = map__size(new_map);
1446 if (!(stext >= map__start(new_map) && stext < map__end(new_map)))
1450 * On some architectures, ARM64 for example, the kernel
1451 * text can get allocated inside of the vmalloc segment.
1452 * Select the smallest matching segment, in case stext
1453 * falls within more than one in the list.
1455 if (!replacement_map || new_size < replacement_size) {
1456 replacement_map = new_map;
1457 replacement_size = new_size;
1462 if (!replacement_map)
1463 replacement_map = list_entry(md.maps.next, struct map_list_node, node)->map;
1466 while (!list_empty(&md.maps)) {
1467 struct map_list_node *new_node = list_entry(md.maps.next, struct map_list_node, node);
1468 struct map *new_map = new_node->map;
1470 list_del_init(&new_node->node);
1472 if (RC_CHK_ACCESS(new_map) == RC_CHK_ACCESS(replacement_map)) {
1473 struct map *map_ref;
1475 map__set_start(map, map__start(new_map));
1476 map__set_end(map, map__end(new_map));
1477 map__set_pgoff(map, map__pgoff(new_map));
1478 map__set_map_ip(map, map__map_ip_ptr(new_map));
1479 map__set_unmap_ip(map, map__unmap_ip_ptr(new_map));
1480 /* Ensure maps are correctly ordered */
1481 map_ref = map__get(map);
1482 maps__remove(kmaps, map_ref);
1483 err = maps__insert(kmaps, map_ref);
1490 * Merge kcore map into existing maps,
1491 * and ensure that current maps (eBPF)
1494 if (maps__merge_in(kmaps, new_map)) {
1502 if (machine__is(machine, "x86_64")) {
1506 * If one of the corresponding symbols is there, assume the
1507 * entry trampoline maps are too.
1509 if (!kallsyms__get_function_start(kallsyms_filename,
1510 ENTRY_TRAMPOLINE_NAME,
1512 machine->trampolines_mapped = true;
1516 * Set the data type and long name so that kcore can be read via
1517 * dso__data_read_addr().
1519 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1520 dso->binary_type = DSO_BINARY_TYPE__GUEST_KCORE;
1522 dso->binary_type = DSO_BINARY_TYPE__KCORE;
1523 dso__set_long_name(dso, strdup(kcore_filename), true);
1527 if (map__prot(map) & PROT_EXEC)
1528 pr_debug("Using %s for kernel object code\n", kcore_filename);
1530 pr_debug("Using %s for kernel data\n", kcore_filename);
1535 while (!list_empty(&md.maps)) {
1536 struct map_list_node *list_node;
1538 list_node = list_entry(md.maps.next, struct map_list_node, node);
1539 list_del_init(&list_node->node);
1540 map__zput(list_node->map);
1548 * If the kernel is relocated at boot time, kallsyms won't match. Compute the
1549 * delta based on the relocation reference symbol.
1551 static int kallsyms__delta(struct kmap *kmap, const char *filename, u64 *delta)
1555 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
1558 if (kallsyms__get_function_start(filename, kmap->ref_reloc_sym->name, &addr))
1561 *delta = addr - kmap->ref_reloc_sym->addr;
1565 int __dso__load_kallsyms(struct dso *dso, const char *filename,
1566 struct map *map, bool no_kcore)
1568 struct kmap *kmap = map__kmap(map);
1571 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
1574 if (!kmap || !kmap->kmaps)
1577 if (dso__load_all_kallsyms(dso, filename) < 0)
1580 if (kallsyms__delta(kmap, filename, &delta))
1583 symbols__fixup_end(&dso->symbols, true);
1584 symbols__fixup_duplicate(&dso->symbols);
1586 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1587 dso->symtab_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
1589 dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
1591 if (!no_kcore && !dso__load_kcore(dso, map, filename))
1592 return maps__split_kallsyms_for_kcore(kmap->kmaps, dso);
1594 return maps__split_kallsyms(kmap->kmaps, dso, delta, map);
1597 int dso__load_kallsyms(struct dso *dso, const char *filename,
1600 return __dso__load_kallsyms(dso, filename, map, false);
1603 static int dso__load_perf_map(const char *map_path, struct dso *dso)
1610 file = fopen(map_path, "r");
1614 while (!feof(file)) {
1619 line_len = getline(&line, &n, file);
1626 line[--line_len] = '\0'; /* \n */
1628 len = hex2u64(line, &start);
1631 if (len + 2 >= line_len)
1634 len += hex2u64(line + len, &size);
1637 if (len + 2 >= line_len)
1640 sym = symbol__new(start, size, STB_GLOBAL, STT_FUNC, line + len);
1643 goto out_delete_line;
1645 symbols__insert(&dso->symbols, sym);
1660 #ifdef HAVE_LIBBFD_SUPPORT
1661 #define PACKAGE 'perf'
1664 static int bfd_symbols__cmpvalue(const void *a, const void *b)
1666 const asymbol *as = *(const asymbol **)a, *bs = *(const asymbol **)b;
1668 if (bfd_asymbol_value(as) != bfd_asymbol_value(bs))
1669 return bfd_asymbol_value(as) - bfd_asymbol_value(bs);
1671 return bfd_asymbol_name(as)[0] - bfd_asymbol_name(bs)[0];
1674 static int bfd2elf_binding(asymbol *symbol)
1676 if (symbol->flags & BSF_WEAK)
1678 if (symbol->flags & BSF_GLOBAL)
1680 if (symbol->flags & BSF_LOCAL)
1685 int dso__load_bfd_symbols(struct dso *dso, const char *debugfile)
1688 long symbols_size, symbols_count, i;
1690 asymbol **symbols, *sym;
1691 struct symbol *symbol;
1695 abfd = bfd_openr(debugfile, NULL);
1699 if (!bfd_check_format(abfd, bfd_object)) {
1700 pr_debug2("%s: cannot read %s bfd file.\n", __func__,
1705 if (bfd_get_flavour(abfd) == bfd_target_elf_flavour)
1708 symbols_size = bfd_get_symtab_upper_bound(abfd);
1709 if (symbols_size == 0) {
1714 if (symbols_size < 0)
1717 symbols = malloc(symbols_size);
1721 symbols_count = bfd_canonicalize_symtab(abfd, symbols);
1722 if (symbols_count < 0)
1725 section = bfd_get_section_by_name(abfd, ".text");
1727 for (i = 0; i < symbols_count; ++i) {
1728 if (!strcmp(bfd_asymbol_name(symbols[i]), "__ImageBase") ||
1729 !strcmp(bfd_asymbol_name(symbols[i]), "__image_base__"))
1732 if (i < symbols_count) {
1733 /* PE symbols can only have 4 bytes, so use .text high bits */
1734 dso->text_offset = section->vma - (u32)section->vma;
1735 dso->text_offset += (u32)bfd_asymbol_value(symbols[i]);
1737 dso->text_offset = section->vma - section->filepos;
1741 qsort(symbols, symbols_count, sizeof(asymbol *), bfd_symbols__cmpvalue);
1743 #ifdef bfd_get_section
1744 #define bfd_asymbol_section bfd_get_section
1746 for (i = 0; i < symbols_count; ++i) {
1748 section = bfd_asymbol_section(sym);
1749 if (bfd2elf_binding(sym) < 0)
1752 while (i + 1 < symbols_count &&
1753 bfd_asymbol_section(symbols[i + 1]) == section &&
1754 bfd2elf_binding(symbols[i + 1]) < 0)
1757 if (i + 1 < symbols_count &&
1758 bfd_asymbol_section(symbols[i + 1]) == section)
1759 len = symbols[i + 1]->value - sym->value;
1761 len = section->size - sym->value;
1763 start = bfd_asymbol_value(sym) - dso->text_offset;
1764 symbol = symbol__new(start, len, bfd2elf_binding(sym), STT_FUNC,
1765 bfd_asymbol_name(sym));
1769 symbols__insert(&dso->symbols, symbol);
1771 #ifdef bfd_get_section
1772 #undef bfd_asymbol_section
1775 symbols__fixup_end(&dso->symbols, false);
1776 symbols__fixup_duplicate(&dso->symbols);
1777 dso->adjust_symbols = 1;
1788 static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
1789 enum dso_binary_type type)
1792 case DSO_BINARY_TYPE__JAVA_JIT:
1793 case DSO_BINARY_TYPE__DEBUGLINK:
1794 case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
1795 case DSO_BINARY_TYPE__FEDORA_DEBUGINFO:
1796 case DSO_BINARY_TYPE__UBUNTU_DEBUGINFO:
1797 case DSO_BINARY_TYPE__MIXEDUP_UBUNTU_DEBUGINFO:
1798 case DSO_BINARY_TYPE__BUILDID_DEBUGINFO:
1799 case DSO_BINARY_TYPE__OPENEMBEDDED_DEBUGINFO:
1800 return !kmod && dso->kernel == DSO_SPACE__USER;
1802 case DSO_BINARY_TYPE__KALLSYMS:
1803 case DSO_BINARY_TYPE__VMLINUX:
1804 case DSO_BINARY_TYPE__KCORE:
1805 return dso->kernel == DSO_SPACE__KERNEL;
1807 case DSO_BINARY_TYPE__GUEST_KALLSYMS:
1808 case DSO_BINARY_TYPE__GUEST_VMLINUX:
1809 case DSO_BINARY_TYPE__GUEST_KCORE:
1810 return dso->kernel == DSO_SPACE__KERNEL_GUEST;
1812 case DSO_BINARY_TYPE__GUEST_KMODULE:
1813 case DSO_BINARY_TYPE__GUEST_KMODULE_COMP:
1814 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE:
1815 case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
1817 * kernel modules know their symtab type - it's set when
1818 * creating a module dso in machine__addnew_module_map().
1820 return kmod && dso->symtab_type == type;
1822 case DSO_BINARY_TYPE__BUILD_ID_CACHE:
1823 case DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO:
1826 case DSO_BINARY_TYPE__BPF_PROG_INFO:
1827 case DSO_BINARY_TYPE__BPF_IMAGE:
1828 case DSO_BINARY_TYPE__OOL:
1829 case DSO_BINARY_TYPE__NOT_FOUND:
1835 /* Checks for the existence of the perf-<pid>.map file in two different
1836 * locations. First, if the process is a separate mount namespace, check in
1837 * that namespace using the pid of the innermost pid namespace. If's not in a
1838 * namespace, or the file can't be found there, try in the mount namespace of
1839 * the tracing process using our view of its pid.
1841 static int dso__find_perf_map(char *filebuf, size_t bufsz,
1842 struct nsinfo **nsip)
1844 struct nscookie nsc;
1846 struct nsinfo *nnsi;
1851 if (nsinfo__need_setns(nsi)) {
1852 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__nstgid(nsi));
1853 nsinfo__mountns_enter(nsi, &nsc);
1854 rc = access(filebuf, R_OK);
1855 nsinfo__mountns_exit(&nsc);
1860 nnsi = nsinfo__copy(nsi);
1864 nsinfo__clear_need_setns(nnsi);
1865 snprintf(filebuf, bufsz, "/tmp/perf-%d.map", nsinfo__tgid(nnsi));
1873 int dso__load(struct dso *dso, struct map *map)
1878 struct machine *machine = NULL;
1879 char *root_dir = (char *) "";
1881 struct symsrc ss_[2];
1882 struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
1885 struct build_id bid;
1886 struct nscookie nsc;
1887 char newmapname[PATH_MAX];
1888 const char *map_path = dso->long_name;
1890 mutex_lock(&dso->lock);
1891 perfmap = strncmp(dso->name, "/tmp/perf-", 10) == 0;
1893 if (dso->nsinfo && (dso__find_perf_map(newmapname,
1894 sizeof(newmapname), &dso->nsinfo) == 0)) {
1895 map_path = newmapname;
1899 nsinfo__mountns_enter(dso->nsinfo, &nsc);
1901 /* check again under the dso->lock */
1902 if (dso__loaded(dso)) {
1907 kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1908 dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
1909 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE ||
1910 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP;
1912 if (dso->kernel && !kmod) {
1913 if (dso->kernel == DSO_SPACE__KERNEL)
1914 ret = dso__load_kernel_sym(dso, map);
1915 else if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
1916 ret = dso__load_guest_kernel_sym(dso, map);
1918 machine = maps__machine(map__kmaps(map));
1919 if (machine__is(machine, "x86_64"))
1920 machine__map_x86_64_entry_trampolines(machine, dso);
1924 dso->adjust_symbols = 0;
1927 ret = dso__load_perf_map(map_path, dso);
1928 dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
1929 DSO_BINARY_TYPE__NOT_FOUND;
1934 root_dir = machine->root_dir;
1936 name = malloc(PATH_MAX);
1941 * Read the build id if possible. This is required for
1942 * DSO_BINARY_TYPE__BUILDID_DEBUGINFO to work
1944 if (!dso->has_build_id &&
1945 is_regular_file(dso->long_name)) {
1946 __symbol__join_symfs(name, PATH_MAX, dso->long_name);
1947 if (filename__read_build_id(name, &bid) > 0)
1948 dso__set_build_id(dso, &bid);
1952 * Iterate over candidate debug images.
1953 * Keep track of "interesting" ones (those which have a symtab, dynsym,
1954 * and/or opd section) for processing.
1956 for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) {
1957 struct symsrc *ss = &ss_[ss_pos];
1958 bool next_slot = false;
1964 enum dso_binary_type symtab_type = binary_type_symtab[i];
1966 nsexit = (symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE ||
1967 symtab_type == DSO_BINARY_TYPE__BUILD_ID_CACHE_DEBUGINFO);
1969 if (!dso__is_compatible_symtab_type(dso, kmod, symtab_type))
1972 if (dso__read_binary_type_filename(dso, symtab_type,
1973 root_dir, name, PATH_MAX))
1977 nsinfo__mountns_exit(&nsc);
1979 is_reg = is_regular_file(name);
1980 if (!is_reg && errno == ENOENT && dso->nsinfo) {
1981 char *new_name = dso__filename_with_chroot(dso, name);
1983 is_reg = is_regular_file(new_name);
1984 strlcpy(name, new_name, PATH_MAX);
1989 #ifdef HAVE_LIBBFD_SUPPORT
1991 bfdrc = dso__load_bfd_symbols(dso, name);
1993 if (is_reg && bfdrc < 0)
1994 sirc = symsrc__init(ss, dso, name, symtab_type);
1997 nsinfo__mountns_enter(dso->nsinfo, &nsc);
2004 if (!is_reg || sirc < 0)
2007 if (!syms_ss && symsrc__has_symtab(ss)) {
2010 if (!dso->symsrc_filename)
2011 dso->symsrc_filename = strdup(name);
2014 if (!runtime_ss && symsrc__possibly_runtime(ss)) {
2022 if (syms_ss && runtime_ss)
2025 symsrc__destroy(ss);
2030 if (!runtime_ss && !syms_ss)
2033 if (runtime_ss && !syms_ss) {
2034 syms_ss = runtime_ss;
2037 /* We'll have to hope for the best */
2038 if (!runtime_ss && syms_ss)
2039 runtime_ss = syms_ss;
2042 ret = dso__load_sym(dso, map, syms_ss, runtime_ss, kmod);
2049 nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss);
2054 for (; ss_pos > 0; ss_pos--)
2055 symsrc__destroy(&ss_[ss_pos - 1]);
2058 if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
2061 dso__set_loaded(dso);
2062 mutex_unlock(&dso->lock);
2063 nsinfo__mountns_exit(&nsc);
2068 static int map__strcmp(const void *a, const void *b)
2070 const struct map *map_a = *(const struct map **)a;
2071 const struct map *map_b = *(const struct map **)b;
2072 const struct dso *dso_a = map__dso(map_a);
2073 const struct dso *dso_b = map__dso(map_b);
2074 int ret = strcmp(dso_a->short_name, dso_b->short_name);
2076 if (ret == 0 && map_a != map_b) {
2078 * Ensure distinct but name equal maps have an order in part to
2079 * aid reference counting.
2081 ret = (int)map__start(map_a) - (int)map__start(map_b);
2083 ret = (int)((intptr_t)map_a - (intptr_t)map_b);
2089 static int map__strcmp_name(const void *name, const void *b)
2091 const struct dso *dso = map__dso(*(const struct map **)b);
2093 return strcmp(name, dso->short_name);
2096 void __maps__sort_by_name(struct maps *maps)
2098 qsort(maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(struct map *), map__strcmp);
2101 static int map__groups__sort_by_name_from_rbtree(struct maps *maps)
2103 struct map_rb_node *rb_node;
2104 struct map **maps_by_name = realloc(maps__maps_by_name(maps),
2105 maps__nr_maps(maps) * sizeof(struct map *));
2108 if (maps_by_name == NULL)
2111 up_read(maps__lock(maps));
2112 down_write(maps__lock(maps));
2114 RC_CHK_ACCESS(maps)->maps_by_name = maps_by_name;
2115 RC_CHK_ACCESS(maps)->nr_maps_allocated = maps__nr_maps(maps);
2117 maps__for_each_entry(maps, rb_node)
2118 maps_by_name[i++] = map__get(rb_node->map);
2120 __maps__sort_by_name(maps);
2122 up_write(maps__lock(maps));
2123 down_read(maps__lock(maps));
2128 static struct map *__maps__find_by_name(struct maps *maps, const char *name)
2132 if (maps__maps_by_name(maps) == NULL &&
2133 map__groups__sort_by_name_from_rbtree(maps))
2136 mapp = bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps),
2137 sizeof(*mapp), map__strcmp_name);
2143 struct map *maps__find_by_name(struct maps *maps, const char *name)
2145 struct map_rb_node *rb_node;
2148 down_read(maps__lock(maps));
2151 if (RC_CHK_ACCESS(maps)->last_search_by_name) {
2152 const struct dso *dso = map__dso(RC_CHK_ACCESS(maps)->last_search_by_name);
2154 if (strcmp(dso->short_name, name) == 0) {
2155 map = RC_CHK_ACCESS(maps)->last_search_by_name;
2160 * If we have maps->maps_by_name, then the name isn't in the rbtree,
2161 * as maps->maps_by_name mirrors the rbtree when lookups by name are
2164 map = __maps__find_by_name(maps, name);
2165 if (map || maps__maps_by_name(maps) != NULL)
2168 /* Fallback to traversing the rbtree... */
2169 maps__for_each_entry(maps, rb_node) {
2173 dso = map__dso(map);
2174 if (strcmp(dso->short_name, name) == 0) {
2175 RC_CHK_ACCESS(maps)->last_search_by_name = map;
2182 up_read(maps__lock(maps));
2186 int dso__load_vmlinux(struct dso *dso, struct map *map,
2187 const char *vmlinux, bool vmlinux_allocated)
2191 char symfs_vmlinux[PATH_MAX];
2192 enum dso_binary_type symtab_type;
2194 if (vmlinux[0] == '/')
2195 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
2197 symbol__join_symfs(symfs_vmlinux, vmlinux);
2199 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
2200 symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
2202 symtab_type = DSO_BINARY_TYPE__VMLINUX;
2204 if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type))
2208 * dso__load_sym() may copy 'dso' which will result in the copies having
2209 * an incorrect long name unless we set it here first.
2211 dso__set_long_name(dso, vmlinux, vmlinux_allocated);
2212 if (dso->kernel == DSO_SPACE__KERNEL_GUEST)
2213 dso->binary_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
2215 dso->binary_type = DSO_BINARY_TYPE__VMLINUX;
2217 err = dso__load_sym(dso, map, &ss, &ss, 0);
2218 symsrc__destroy(&ss);
2221 dso__set_loaded(dso);
2222 pr_debug("Using %s for symbols\n", symfs_vmlinux);
2228 int dso__load_vmlinux_path(struct dso *dso, struct map *map)
2231 char *filename = NULL;
2233 pr_debug("Looking at the vmlinux_path (%d entries long)\n",
2234 vmlinux_path__nr_entries + 1);
2236 for (i = 0; i < vmlinux_path__nr_entries; ++i) {
2237 err = dso__load_vmlinux(dso, map, vmlinux_path[i], false);
2242 if (!symbol_conf.ignore_vmlinux_buildid)
2243 filename = dso__build_id_filename(dso, NULL, 0, false);
2244 if (filename != NULL) {
2245 err = dso__load_vmlinux(dso, map, filename, true);
2254 static bool visible_dir_filter(const char *name, struct dirent *d)
2256 if (d->d_type != DT_DIR)
2258 return lsdir_no_dot_filter(name, d);
2261 static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
2263 char kallsyms_filename[PATH_MAX];
2265 struct strlist *dirs;
2266 struct str_node *nd;
2268 dirs = lsdir(dir, visible_dir_filter);
2272 strlist__for_each_entry(nd, dirs) {
2273 scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
2274 "%s/%s/kallsyms", dir, nd->s);
2275 if (!validate_kcore_addresses(kallsyms_filename, map)) {
2276 strlcpy(dir, kallsyms_filename, dir_sz);
2282 strlist__delete(dirs);
2288 * Use open(O_RDONLY) to check readability directly instead of access(R_OK)
2289 * since access(R_OK) only checks with real UID/GID but open() use effective
2290 * UID/GID and actual capabilities (e.g. /proc/kcore requires CAP_SYS_RAWIO).
2292 static bool filename__readable(const char *file)
2294 int fd = open(file, O_RDONLY);
2301 static char *dso__find_kallsyms(struct dso *dso, struct map *map)
2303 struct build_id bid;
2304 char sbuild_id[SBUILD_ID_SIZE];
2305 bool is_host = false;
2306 char path[PATH_MAX];
2308 if (!dso->has_build_id) {
2310 * Last resort, if we don't have a build-id and couldn't find
2311 * any vmlinux file, try the running kernel kallsyms table.
2316 if (sysfs__read_build_id("/sys/kernel/notes", &bid) == 0)
2317 is_host = dso__build_id_equal(dso, &bid);
2319 /* Try a fast path for /proc/kallsyms if possible */
2322 * Do not check the build-id cache, unless we know we cannot use
2323 * /proc/kcore or module maps don't match to /proc/kallsyms.
2324 * To check readability of /proc/kcore, do not use access(R_OK)
2325 * since /proc/kcore requires CAP_SYS_RAWIO to read and access
2328 if (filename__readable("/proc/kcore") &&
2329 !validate_kcore_addresses("/proc/kallsyms", map))
2333 build_id__sprintf(&dso->bid, sbuild_id);
2335 /* Find kallsyms in build-id cache with kcore */
2336 scnprintf(path, sizeof(path), "%s/%s/%s",
2337 buildid_dir, DSO__NAME_KCORE, sbuild_id);
2339 if (!find_matching_kcore(map, path, sizeof(path)))
2340 return strdup(path);
2342 /* Use current /proc/kallsyms if possible */
2345 return strdup("/proc/kallsyms");
2348 /* Finally, find a cache of kallsyms */
2349 if (!build_id_cache__kallsyms_path(sbuild_id, path, sizeof(path))) {
2350 pr_err("No kallsyms or vmlinux with build-id %s was found\n",
2355 return strdup(path);
2358 static int dso__load_kernel_sym(struct dso *dso, struct map *map)
2361 const char *kallsyms_filename = NULL;
2362 char *kallsyms_allocated_filename = NULL;
2363 char *filename = NULL;
2366 * Step 1: if the user specified a kallsyms or vmlinux filename, use
2367 * it and only it, reporting errors to the user if it cannot be used.
2369 * For instance, try to analyse an ARM perf.data file _without_ a
2370 * build-id, or if the user specifies the wrong path to the right
2371 * vmlinux file, obviously we can't fallback to another vmlinux (a
2372 * x86_86 one, on the machine where analysis is being performed, say),
2373 * or worse, /proc/kallsyms.
2375 * If the specified file _has_ a build-id and there is a build-id
2376 * section in the perf.data file, we will still do the expected
2377 * validation in dso__load_vmlinux and will bail out if they don't
2380 if (symbol_conf.kallsyms_name != NULL) {
2381 kallsyms_filename = symbol_conf.kallsyms_name;
2385 if (!symbol_conf.ignore_vmlinux && symbol_conf.vmlinux_name != NULL) {
2386 return dso__load_vmlinux(dso, map, symbol_conf.vmlinux_name, false);
2390 * Before checking on common vmlinux locations, check if it's
2391 * stored as standard build id binary (not kallsyms) under
2394 if (!symbol_conf.ignore_vmlinux_buildid)
2395 filename = __dso__build_id_filename(dso, NULL, 0, false, false);
2396 if (filename != NULL) {
2397 err = dso__load_vmlinux(dso, map, filename, true);
2403 if (!symbol_conf.ignore_vmlinux && vmlinux_path != NULL) {
2404 err = dso__load_vmlinux_path(dso, map);
2409 /* do not try local files if a symfs was given */
2410 if (symbol_conf.symfs[0] != 0)
2413 kallsyms_allocated_filename = dso__find_kallsyms(dso, map);
2414 if (!kallsyms_allocated_filename)
2417 kallsyms_filename = kallsyms_allocated_filename;
2420 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2422 pr_debug("Using %s for symbols\n", kallsyms_filename);
2423 free(kallsyms_allocated_filename);
2425 if (err > 0 && !dso__is_kcore(dso)) {
2426 dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
2427 dso__set_long_name(dso, DSO__NAME_KALLSYMS, false);
2428 map__fixup_start(map);
2429 map__fixup_end(map);
2435 static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map)
2438 const char *kallsyms_filename;
2439 struct machine *machine = maps__machine(map__kmaps(map));
2440 char path[PATH_MAX];
2442 if (machine->kallsyms_filename) {
2443 kallsyms_filename = machine->kallsyms_filename;
2444 } else if (machine__is_default_guest(machine)) {
2446 * if the user specified a vmlinux filename, use it and only
2447 * it, reporting errors to the user if it cannot be used.
2448 * Or use file guest_kallsyms inputted by user on commandline
2450 if (symbol_conf.default_guest_vmlinux_name != NULL) {
2451 err = dso__load_vmlinux(dso, map,
2452 symbol_conf.default_guest_vmlinux_name,
2457 kallsyms_filename = symbol_conf.default_guest_kallsyms;
2458 if (!kallsyms_filename)
2461 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2462 kallsyms_filename = path;
2465 err = dso__load_kallsyms(dso, kallsyms_filename, map);
2467 pr_debug("Using %s for symbols\n", kallsyms_filename);
2468 if (err > 0 && !dso__is_kcore(dso)) {
2469 dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
2470 dso__set_long_name(dso, machine->mmap_name, false);
2471 map__fixup_start(map);
2472 map__fixup_end(map);
2478 static void vmlinux_path__exit(void)
2480 while (--vmlinux_path__nr_entries >= 0)
2481 zfree(&vmlinux_path[vmlinux_path__nr_entries]);
2482 vmlinux_path__nr_entries = 0;
2484 zfree(&vmlinux_path);
2487 static const char * const vmlinux_paths[] = {
2492 static const char * const vmlinux_paths_upd[] = {
2494 "/usr/lib/debug/boot/vmlinux-%s",
2495 "/lib/modules/%s/build/vmlinux",
2496 "/usr/lib/debug/lib/modules/%s/vmlinux",
2497 "/usr/lib/debug/boot/vmlinux-%s.debug"
2500 static int vmlinux_path__add(const char *new_entry)
2502 vmlinux_path[vmlinux_path__nr_entries] = strdup(new_entry);
2503 if (vmlinux_path[vmlinux_path__nr_entries] == NULL)
2505 ++vmlinux_path__nr_entries;
2510 static int vmlinux_path__init(struct perf_env *env)
2514 char *kernel_version;
2517 vmlinux_path = malloc(sizeof(char *) * (ARRAY_SIZE(vmlinux_paths) +
2518 ARRAY_SIZE(vmlinux_paths_upd)));
2519 if (vmlinux_path == NULL)
2522 for (i = 0; i < ARRAY_SIZE(vmlinux_paths); i++)
2523 if (vmlinux_path__add(vmlinux_paths[i]) < 0)
2526 /* only try kernel version if no symfs was given */
2527 if (symbol_conf.symfs[0] != 0)
2531 kernel_version = env->os_release;
2533 if (uname(&uts) < 0)
2536 kernel_version = uts.release;
2539 for (i = 0; i < ARRAY_SIZE(vmlinux_paths_upd); i++) {
2540 snprintf(bf, sizeof(bf), vmlinux_paths_upd[i], kernel_version);
2541 if (vmlinux_path__add(bf) < 0)
2548 vmlinux_path__exit();
2552 int setup_list(struct strlist **list, const char *list_str,
2553 const char *list_name)
2555 if (list_str == NULL)
2558 *list = strlist__new(list_str, NULL);
2560 pr_err("problems parsing %s list\n", list_name);
2564 symbol_conf.has_filter = true;
2568 int setup_intlist(struct intlist **list, const char *list_str,
2569 const char *list_name)
2571 if (list_str == NULL)
2574 *list = intlist__new(list_str);
2576 pr_err("problems parsing %s list\n", list_name);
2582 static int setup_addrlist(struct intlist **addr_list, struct strlist *sym_list)
2584 struct str_node *pos, *tmp;
2590 *addr_list = intlist__new(NULL);
2594 strlist__for_each_entry_safe(pos, tmp, sym_list) {
2596 val = strtoul(pos->s, &sep, 16);
2597 if (errno || (sep == pos->s))
2601 end = pos->s + strlen(pos->s) - 1;
2602 while (end >= sep && isspace(*end))
2609 err = intlist__add(*addr_list, val);
2613 strlist__remove(sym_list, pos);
2618 intlist__delete(*addr_list);
2625 static bool symbol__read_kptr_restrict(void)
2628 FILE *fp = fopen("/proc/sys/kernel/kptr_restrict", "r");
2633 if (fgets(line, sizeof(line), fp) != NULL)
2634 value = perf_cap__capable(CAP_SYSLOG) ?
2641 /* Per kernel/kallsyms.c:
2642 * we also restrict when perf_event_paranoid > 1 w/o CAP_SYSLOG
2644 if (perf_event_paranoid() > 1 && !perf_cap__capable(CAP_SYSLOG))
2650 int symbol__annotation_init(void)
2652 if (symbol_conf.init_annotation)
2655 if (symbol_conf.initialized) {
2656 pr_err("Annotation needs to be init before symbol__init()\n");
2660 symbol_conf.priv_size += sizeof(struct annotation);
2661 symbol_conf.init_annotation = true;
2665 int symbol__init(struct perf_env *env)
2669 if (symbol_conf.initialized)
2672 symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64));
2676 if (symbol_conf.try_vmlinux_path && vmlinux_path__init(env) < 0)
2679 if (symbol_conf.field_sep && *symbol_conf.field_sep == '.') {
2680 pr_err("'.' is the only non valid --field-separator argument\n");
2684 if (setup_list(&symbol_conf.dso_list,
2685 symbol_conf.dso_list_str, "dso") < 0)
2688 if (setup_list(&symbol_conf.comm_list,
2689 symbol_conf.comm_list_str, "comm") < 0)
2690 goto out_free_dso_list;
2692 if (setup_intlist(&symbol_conf.pid_list,
2693 symbol_conf.pid_list_str, "pid") < 0)
2694 goto out_free_comm_list;
2696 if (setup_intlist(&symbol_conf.tid_list,
2697 symbol_conf.tid_list_str, "tid") < 0)
2698 goto out_free_pid_list;
2700 if (setup_list(&symbol_conf.sym_list,
2701 symbol_conf.sym_list_str, "symbol") < 0)
2702 goto out_free_tid_list;
2704 if (symbol_conf.sym_list &&
2705 setup_addrlist(&symbol_conf.addr_list, symbol_conf.sym_list) < 0)
2706 goto out_free_sym_list;
2708 if (setup_list(&symbol_conf.bt_stop_list,
2709 symbol_conf.bt_stop_list_str, "symbol") < 0)
2710 goto out_free_sym_list;
2713 * A path to symbols of "/" is identical to ""
2714 * reset here for simplicity.
2716 symfs = realpath(symbol_conf.symfs, NULL);
2718 symfs = symbol_conf.symfs;
2719 if (strcmp(symfs, "/") == 0)
2720 symbol_conf.symfs = "";
2721 if (symfs != symbol_conf.symfs)
2722 free((void *)symfs);
2724 symbol_conf.kptr_restrict = symbol__read_kptr_restrict();
2726 symbol_conf.initialized = true;
2730 strlist__delete(symbol_conf.sym_list);
2731 intlist__delete(symbol_conf.addr_list);
2733 intlist__delete(symbol_conf.tid_list);
2735 intlist__delete(symbol_conf.pid_list);
2737 strlist__delete(symbol_conf.comm_list);
2739 strlist__delete(symbol_conf.dso_list);
2743 void symbol__exit(void)
2745 if (!symbol_conf.initialized)
2747 strlist__delete(symbol_conf.bt_stop_list);
2748 strlist__delete(symbol_conf.sym_list);
2749 strlist__delete(symbol_conf.dso_list);
2750 strlist__delete(symbol_conf.comm_list);
2751 intlist__delete(symbol_conf.tid_list);
2752 intlist__delete(symbol_conf.pid_list);
2753 intlist__delete(symbol_conf.addr_list);
2754 vmlinux_path__exit();
2755 symbol_conf.sym_list = symbol_conf.dso_list = symbol_conf.comm_list = NULL;
2756 symbol_conf.bt_stop_list = NULL;
2757 symbol_conf.initialized = false;
2760 int symbol__config_symfs(const struct option *opt __maybe_unused,
2761 const char *dir, int unset __maybe_unused)
2766 symbol_conf.symfs = strdup(dir);
2767 if (symbol_conf.symfs == NULL)
2770 /* skip the locally configured cache if a symfs is given, and
2771 * config buildid dir to symfs/.debug
2773 ret = asprintf(&bf, "%s/%s", dir, ".debug");
2777 set_buildid_dir(bf);
2783 struct mem_info *mem_info__get(struct mem_info *mi)
2786 refcount_inc(&mi->refcnt);
2790 void mem_info__put(struct mem_info *mi)
2792 if (mi && refcount_dec_and_test(&mi->refcnt))
2796 struct mem_info *mem_info__new(void)
2798 struct mem_info *mi = zalloc(sizeof(*mi));
2801 refcount_set(&mi->refcnt, 1);
2806 * Checks that user supplied symbol kernel files are accessible because
2807 * the default mechanism for accessing elf files fails silently. i.e. if
2808 * debug syms for a build ID aren't found perf carries on normally. When
2809 * they are user supplied we should assume that the user doesn't want to
2812 int symbol__validate_sym_arguments(void)
2814 if (symbol_conf.vmlinux_name &&
2815 access(symbol_conf.vmlinux_name, R_OK)) {
2816 pr_err("Invalid file: %s\n", symbol_conf.vmlinux_name);
2819 if (symbol_conf.kallsyms_name &&
2820 access(symbol_conf.kallsyms_name, R_OK)) {
2821 pr_err("Invalid file: %s\n", symbol_conf.kallsyms_name);