14 const char *map_type__name[MAP__NR_TYPES] = {
15 [MAP__FUNCTION] = "Functions",
16 [MAP__VARIABLE] = "Variables",
19 static inline int is_anon_memory(const char *filename)
21 return strcmp(filename, "//anon") == 0;
24 static inline int is_no_dso_memory(const char *filename)
26 return !strcmp(filename, "[stack]") ||
27 !strcmp(filename, "[heap]");
30 void map__init(struct map *self, enum map_type type,
31 u64 start, u64 end, u64 pgoff, struct dso *dso)
38 self->map_ip = map__map_ip;
39 self->unmap_ip = map__unmap_ip;
40 RB_CLEAR_NODE(&self->rb_node);
42 self->referenced = false;
43 self->erange_warned = false;
46 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
47 u64 pgoff, u32 pid, char *filename,
50 struct map *self = malloc(sizeof(*self));
53 char newfilename[PATH_MAX];
55 int anon, no_dso, vdso;
57 anon = is_anon_memory(filename);
58 vdso = is_vdso_map(filename);
59 no_dso = is_no_dso_memory(filename);
62 snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
63 filename = newfilename;
68 dso = vdso__dso_findnew(dsos__list);
70 dso = __dsos__findnew(dsos__list, filename);
75 map__init(self, type, start, start + len, pgoff, dso);
78 self->map_ip = self->unmap_ip = identity__map_ip;
81 * Set memory without DSO as loaded. All map__find_*
82 * functions still return NULL, and we avoid the
83 * unnecessary map__load warning.
86 dso__set_loaded(dso, self->type);
96 * Constructor variant for modules (where we know from /proc/modules where
97 * they are loaded) and for vmlinux, where only after we load all the
98 * symbols we'll know where it starts and ends.
100 struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
102 struct map *map = calloc(1, (sizeof(*map) +
103 (dso->kernel ? sizeof(struct kmap) : 0)));
106 * ->end will be filled after we load all the symbols
108 map__init(map, type, start, 0, 0, dso);
114 void map__delete(struct map *self)
119 void map__fixup_start(struct map *self)
121 struct rb_root *symbols = &self->dso->symbols[self->type];
122 struct rb_node *nd = rb_first(symbols);
124 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
125 self->start = sym->start;
129 void map__fixup_end(struct map *self)
131 struct rb_root *symbols = &self->dso->symbols[self->type];
132 struct rb_node *nd = rb_last(symbols);
134 struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
135 self->end = sym->end;
139 #define DSO__DELETED "(deleted)"
141 int map__load(struct map *self, symbol_filter_t filter)
143 const char *name = self->dso->long_name;
146 if (dso__loaded(self->dso, self->type))
149 nr = dso__load(self->dso, self, filter);
151 if (self->dso->has_build_id) {
152 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
154 build_id__sprintf(self->dso->build_id,
155 sizeof(self->dso->build_id),
157 pr_warning("%s with build id %s not found",
160 pr_warning("Failed to open %s", name);
162 pr_warning(", continuing without symbols\n");
164 } else if (nr == 0) {
165 #ifndef NO_LIBELF_SUPPORT
166 const size_t len = strlen(name);
167 const size_t real_len = len - sizeof(DSO__DELETED);
169 if (len > sizeof(DSO__DELETED) &&
170 strcmp(name + real_len + 1, DSO__DELETED) == 0) {
171 pr_warning("%.*s was updated (is prelink enabled?). "
172 "Restart the long running apps that use it!\n",
173 (int)real_len, name);
175 pr_warning("no symbols found in %s, maybe install "
176 "a debug package?\n", name);
182 * Only applies to the kernel, as its symtabs aren't relative like the
185 if (self->dso->kernel)
186 map__reloc_vmlinux(self);
191 struct symbol *map__find_symbol(struct map *self, u64 addr,
192 symbol_filter_t filter)
194 if (map__load(self, filter) < 0)
197 return dso__find_symbol(self->dso, self->type, addr);
200 struct symbol *map__find_symbol_by_name(struct map *self, const char *name,
201 symbol_filter_t filter)
203 if (map__load(self, filter) < 0)
206 if (!dso__sorted_by_name(self->dso, self->type))
207 dso__sort_by_name(self->dso, self->type);
209 return dso__find_symbol_by_name(self->dso, self->type, name);
212 struct map *map__clone(struct map *self)
214 struct map *map = malloc(sizeof(*self));
219 memcpy(map, self, sizeof(*self));
224 int map__overlap(struct map *l, struct map *r)
226 if (l->start > r->start) {
232 if (l->end > r->start)
238 size_t map__fprintf(struct map *self, FILE *fp)
240 return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n",
241 self->start, self->end, self->pgoff, self->dso->name);
244 size_t map__fprintf_dsoname(struct map *map, FILE *fp)
246 const char *dsoname = "[unknown]";
248 if (map && map->dso && (map->dso->name || map->dso->long_name)) {
249 if (symbol_conf.show_kernel_path && map->dso->long_name)
250 dsoname = map->dso->long_name;
251 else if (map->dso->name)
252 dsoname = map->dso->name;
255 return fprintf(fp, "%s", dsoname);
259 * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
260 * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
262 u64 map__rip_2objdump(struct map *map, u64 rip)
264 u64 addr = map->dso->adjust_symbols ?
265 map->unmap_ip(map, rip) : /* RIP -> IP */
270 void map_groups__init(struct map_groups *mg)
273 for (i = 0; i < MAP__NR_TYPES; ++i) {
274 mg->maps[i] = RB_ROOT;
275 INIT_LIST_HEAD(&mg->removed_maps[i]);
280 static void maps__delete(struct rb_root *maps)
282 struct rb_node *next = rb_first(maps);
285 struct map *pos = rb_entry(next, struct map, rb_node);
287 next = rb_next(&pos->rb_node);
288 rb_erase(&pos->rb_node, maps);
293 static void maps__delete_removed(struct list_head *maps)
297 list_for_each_entry_safe(pos, n, maps, node) {
298 list_del(&pos->node);
303 void map_groups__exit(struct map_groups *mg)
307 for (i = 0; i < MAP__NR_TYPES; ++i) {
308 maps__delete(&mg->maps[i]);
309 maps__delete_removed(&mg->removed_maps[i]);
313 void map_groups__flush(struct map_groups *mg)
317 for (type = 0; type < MAP__NR_TYPES; type++) {
318 struct rb_root *root = &mg->maps[type];
319 struct rb_node *next = rb_first(root);
322 struct map *pos = rb_entry(next, struct map, rb_node);
323 next = rb_next(&pos->rb_node);
324 rb_erase(&pos->rb_node, root);
326 * We may have references to this map, for
327 * instance in some hist_entry instances, so
328 * just move them to a separate list.
330 list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
335 struct symbol *map_groups__find_symbol(struct map_groups *mg,
336 enum map_type type, u64 addr,
338 symbol_filter_t filter)
340 struct map *map = map_groups__find(mg, type, addr);
345 return map__find_symbol(map, map->map_ip(map, addr), filter);
351 struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
355 symbol_filter_t filter)
359 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
360 struct map *pos = rb_entry(nd, struct map, rb_node);
361 struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
373 size_t __map_groups__fprintf_maps(struct map_groups *mg,
374 enum map_type type, int verbose, FILE *fp)
376 size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
379 for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
380 struct map *pos = rb_entry(nd, struct map, rb_node);
381 printed += fprintf(fp, "Map:");
382 printed += map__fprintf(pos, fp);
384 printed += dso__fprintf(pos->dso, type, fp);
385 printed += fprintf(fp, "--\n");
392 size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
394 size_t printed = 0, i;
395 for (i = 0; i < MAP__NR_TYPES; ++i)
396 printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
400 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
402 int verbose, FILE *fp)
407 list_for_each_entry(pos, &mg->removed_maps[type], node) {
408 printed += fprintf(fp, "Map:");
409 printed += map__fprintf(pos, fp);
411 printed += dso__fprintf(pos->dso, type, fp);
412 printed += fprintf(fp, "--\n");
418 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
419 int verbose, FILE *fp)
421 size_t printed = 0, i;
422 for (i = 0; i < MAP__NR_TYPES; ++i)
423 printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
427 size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
429 size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
430 printed += fprintf(fp, "Removed maps:\n");
431 return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
434 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
435 int verbose, FILE *fp)
437 struct rb_root *root = &mg->maps[map->type];
438 struct rb_node *next = rb_first(root);
442 struct map *pos = rb_entry(next, struct map, rb_node);
443 next = rb_next(&pos->rb_node);
445 if (!map__overlap(pos, map))
449 fputs("overlapping maps:\n", fp);
450 map__fprintf(map, fp);
451 map__fprintf(pos, fp);
454 rb_erase(&pos->rb_node, root);
456 * Now check if we need to create new maps for areas not
457 * overlapped by the new map:
459 if (map->start > pos->start) {
460 struct map *before = map__clone(pos);
462 if (before == NULL) {
467 before->end = map->start - 1;
468 map_groups__insert(mg, before);
470 map__fprintf(before, fp);
473 if (map->end < pos->end) {
474 struct map *after = map__clone(pos);
481 after->start = map->end + 1;
482 map_groups__insert(mg, after);
484 map__fprintf(after, fp);
488 * If we have references, just move them to a separate list.
491 list_add_tail(&pos->node, &mg->removed_maps[map->type]);
503 * XXX This should not really _copy_ te maps, but refcount them.
505 int map_groups__clone(struct map_groups *mg,
506 struct map_groups *parent, enum map_type type)
509 for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
510 struct map *map = rb_entry(nd, struct map, rb_node);
511 struct map *new = map__clone(map);
514 map_groups__insert(mg, new);
519 static u64 map__reloc_map_ip(struct map *map, u64 ip)
521 return ip + (s64)map->pgoff;
524 static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
526 return ip - (s64)map->pgoff;
529 void map__reloc_vmlinux(struct map *self)
531 struct kmap *kmap = map__kmap(self);
534 if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
537 reloc = (kmap->ref_reloc_sym->unrelocated_addr -
538 kmap->ref_reloc_sym->addr);
543 self->map_ip = map__reloc_map_ip;
544 self->unmap_ip = map__reloc_unmap_ip;
548 void maps__insert(struct rb_root *maps, struct map *map)
550 struct rb_node **p = &maps->rb_node;
551 struct rb_node *parent = NULL;
552 const u64 ip = map->start;
557 m = rb_entry(parent, struct map, rb_node);
564 rb_link_node(&map->rb_node, parent, p);
565 rb_insert_color(&map->rb_node, maps);
568 void maps__remove(struct rb_root *self, struct map *map)
570 rb_erase(&map->rb_node, self);
573 struct map *maps__find(struct rb_root *maps, u64 ip)
575 struct rb_node **p = &maps->rb_node;
576 struct rb_node *parent = NULL;
581 m = rb_entry(parent, struct map, rb_node);
584 else if (ip > m->end)
593 int machine__init(struct machine *self, const char *root_dir, pid_t pid)
595 map_groups__init(&self->kmaps);
596 RB_CLEAR_NODE(&self->rb_node);
597 INIT_LIST_HEAD(&self->user_dsos);
598 INIT_LIST_HEAD(&self->kernel_dsos);
600 self->threads = RB_ROOT;
601 INIT_LIST_HEAD(&self->dead_threads);
602 self->last_match = NULL;
604 self->kmaps.machine = self;
606 self->root_dir = strdup(root_dir);
607 if (self->root_dir == NULL)
610 if (pid != HOST_KERNEL_ID) {
611 struct thread *thread = machine__findnew_thread(self, pid);
617 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
618 thread__set_comm(thread, comm);
624 static void dsos__delete(struct list_head *self)
628 list_for_each_entry_safe(pos, n, self, node) {
629 list_del(&pos->node);
634 void machine__exit(struct machine *self)
636 map_groups__exit(&self->kmaps);
637 dsos__delete(&self->user_dsos);
638 dsos__delete(&self->kernel_dsos);
639 free(self->root_dir);
640 self->root_dir = NULL;
643 void machine__delete(struct machine *self)
649 struct machine *machines__add(struct rb_root *self, pid_t pid,
650 const char *root_dir)
652 struct rb_node **p = &self->rb_node;
653 struct rb_node *parent = NULL;
654 struct machine *pos, *machine = malloc(sizeof(*machine));
659 if (machine__init(machine, root_dir, pid) != 0) {
666 pos = rb_entry(parent, struct machine, rb_node);
673 rb_link_node(&machine->rb_node, parent, p);
674 rb_insert_color(&machine->rb_node, self);
679 struct machine *machines__find(struct rb_root *self, pid_t pid)
681 struct rb_node **p = &self->rb_node;
682 struct rb_node *parent = NULL;
683 struct machine *machine;
684 struct machine *default_machine = NULL;
688 machine = rb_entry(parent, struct machine, rb_node);
689 if (pid < machine->pid)
691 else if (pid > machine->pid)
696 default_machine = machine;
699 return default_machine;
702 struct machine *machines__findnew(struct rb_root *self, pid_t pid)
705 const char *root_dir = "";
706 struct machine *machine = machines__find(self, pid);
708 if (machine && (machine->pid == pid))
711 if ((pid != HOST_KERNEL_ID) &&
712 (pid != DEFAULT_GUEST_KERNEL_ID) &&
713 (symbol_conf.guestmount)) {
714 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
715 if (access(path, R_OK)) {
716 static struct strlist *seen;
719 seen = strlist__new(true, NULL);
721 if (!strlist__has_entry(seen, path)) {
722 pr_err("Can't access file %s\n", path);
723 strlist__add(seen, path);
731 machine = machines__add(self, pid, root_dir);
737 void machines__process(struct rb_root *self, machine__process_t process, void *data)
741 for (nd = rb_first(self); nd; nd = rb_next(nd)) {
742 struct machine *pos = rb_entry(nd, struct machine, rb_node);
747 char *machine__mmap_name(struct machine *self, char *bf, size_t size)
749 if (machine__is_host(self))
750 snprintf(bf, size, "[%s]", "kernel.kallsyms");
751 else if (machine__is_default_guest(self))
752 snprintf(bf, size, "[%s]", "guest.kernel.kallsyms");
754 snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid);
759 void machines__set_id_hdr_size(struct rb_root *machines, u16 id_hdr_size)
761 struct rb_node *node;
762 struct machine *machine;
764 for (node = rb_first(machines); node; node = rb_next(node)) {
765 machine = rb_entry(node, struct machine, rb_node);
766 machine->id_hdr_size = id_hdr_size;