1 #include <linux/types.h>
10 static pid_t event__synthesize_comm(pid_t pid, int full,
11 event__handler_t process,
12 struct perf_session *session)
15 char filename[PATH_MAX];
20 struct dirent dirent, *next;
23 snprintf(filename, sizeof(filename), "/proc/%d/status", pid);
25 fp = fopen(filename, "r");
29 * We raced with a task exiting - just return:
31 pr_debug("couldn't open %s\n", filename);
35 memset(&ev.comm, 0, sizeof(ev.comm));
36 while (!ev.comm.comm[0] || !ev.comm.pid) {
37 if (fgets(bf, sizeof(bf), fp) == NULL)
40 if (memcmp(bf, "Name:", 5) == 0) {
42 while (*name && isspace(*name))
44 size = strlen(name) - 1;
45 memcpy(ev.comm.comm, name, size++);
46 } else if (memcmp(bf, "Tgid:", 5) == 0) {
48 while (*tgids && isspace(*tgids))
50 tgid = ev.comm.pid = atoi(tgids);
54 ev.comm.header.type = PERF_RECORD_COMM;
55 size = ALIGN(size, sizeof(u64));
56 ev.comm.header.size = sizeof(ev.comm) - (sizeof(ev.comm.comm) - size);
61 process(&ev, session);
65 snprintf(filename, sizeof(filename), "/proc/%d/task", pid);
67 tasks = opendir(filename);
71 while (!readdir_r(tasks, &dirent, &next) && next) {
73 pid = strtol(dirent.d_name, &end, 10);
79 process(&ev, session);
88 pr_warning("couldn't get COMM and pgid, malformed %s\n", filename);
92 static int event__synthesize_mmap_events(pid_t pid, pid_t tgid,
93 event__handler_t process,
94 struct perf_session *session)
96 char filename[PATH_MAX];
99 snprintf(filename, sizeof(filename), "/proc/%d/maps", pid);
101 fp = fopen(filename, "r");
104 * We raced with a task exiting - just return:
106 pr_debug("couldn't open %s\n", filename);
111 char bf[BUFSIZ], *pbf = bf;
114 .type = PERF_RECORD_MMAP,
115 .misc = 0, /* Just like the kernel, see kernel/perf_event.c __perf_event_mmap */
120 if (fgets(bf, sizeof(bf), fp) == NULL)
123 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
124 n = hex2u64(pbf, &ev.mmap.start);
128 n = hex2u64(pbf, &ev.mmap.len);
132 if (*pbf == 'x') { /* vm_exec */
133 char *execname = strchr(bf, '/');
136 if (execname == NULL)
137 execname = strstr(bf, "[vdso]");
139 if (execname == NULL)
142 size = strlen(execname);
143 execname[size - 1] = '\0'; /* Remove \n */
144 memcpy(ev.mmap.filename, execname, size);
145 size = ALIGN(size, sizeof(u64));
146 ev.mmap.len -= ev.mmap.start;
147 ev.mmap.header.size = (sizeof(ev.mmap) -
148 (sizeof(ev.mmap.filename) - size));
152 process(&ev, session);
160 int event__synthesize_modules(event__handler_t process,
161 struct perf_session *session)
165 for (nd = rb_first(&session->kmaps.maps[MAP__FUNCTION]);
166 nd; nd = rb_next(nd)) {
169 struct map *pos = rb_entry(nd, struct map, rb_node);
171 if (pos->dso->kernel)
174 size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64));
175 memset(&ev, 0, sizeof(ev));
176 ev.mmap.header.misc = 1; /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
177 ev.mmap.header.type = PERF_RECORD_MMAP;
178 ev.mmap.header.size = (sizeof(ev.mmap) -
179 (sizeof(ev.mmap.filename) - size));
180 ev.mmap.start = pos->start;
181 ev.mmap.len = pos->end - pos->start;
183 memcpy(ev.mmap.filename, pos->dso->long_name,
184 pos->dso->long_name_len + 1);
185 process(&ev, session);
191 int event__synthesize_thread(pid_t pid, event__handler_t process,
192 struct perf_session *session)
194 pid_t tgid = event__synthesize_comm(pid, 1, process, session);
197 return event__synthesize_mmap_events(pid, tgid, process, session);
200 void event__synthesize_threads(event__handler_t process,
201 struct perf_session *session)
204 struct dirent dirent, *next;
206 proc = opendir("/proc");
208 while (!readdir_r(proc, &dirent, &next) && next) {
210 pid_t pid = strtol(dirent.d_name, &end, 10);
212 if (*end) /* only interested in proper numerical dirents */
215 event__synthesize_thread(pid, process, session);
221 struct process_symbol_args {
226 static int find_symbol_cb(void *arg, const char *name, char type, u64 start)
228 struct process_symbol_args *args = arg;
231 * Must be a function or at least an alias, as in PARISC64, where "_text" is
232 * an 'A' to the same address as "_stext".
234 if (!(symbol_type__is_a(type, MAP__FUNCTION) ||
235 type == 'A') || strcmp(name, args->name))
242 int event__synthesize_kernel_mmap(event__handler_t process,
243 struct perf_session *session,
244 const char *symbol_name)
249 .type = PERF_RECORD_MMAP,
250 .misc = 1, /* kernel uses 0 for user space maps, see kernel/perf_event.c __perf_event_mmap */
254 * We should get this from /sys/kernel/sections/.text, but till that is
255 * available use this, and after it is use this as a fallback for older
258 struct process_symbol_args args = { .name = symbol_name, };
260 if (kallsyms__parse("/proc/kallsyms", &args, find_symbol_cb) <= 0)
263 size = snprintf(ev.mmap.filename, sizeof(ev.mmap.filename),
264 "[kernel.kallsyms.%s]", symbol_name) + 1;
265 size = ALIGN(size, sizeof(u64));
266 ev.mmap.header.size = (sizeof(ev.mmap) - (sizeof(ev.mmap.filename) - size));
267 ev.mmap.pgoff = args.start;
268 ev.mmap.start = session->vmlinux_maps[MAP__FUNCTION]->start;
269 ev.mmap.len = session->vmlinux_maps[MAP__FUNCTION]->end - ev.mmap.start ;
271 return process(&ev, session);
274 static void thread__comm_adjust(struct thread *self)
276 char *comm = self->comm;
278 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
279 (!symbol_conf.comm_list ||
280 strlist__has_entry(symbol_conf.comm_list, comm))) {
281 unsigned int slen = strlen(comm);
283 if (slen > comms__col_width) {
284 comms__col_width = slen;
285 threads__col_width = slen + 6;
290 static int thread__set_comm_adjust(struct thread *self, const char *comm)
292 int ret = thread__set_comm(self, comm);
297 thread__comm_adjust(self);
302 int event__process_comm(event_t *self, struct perf_session *session)
304 struct thread *thread = perf_session__findnew(session, self->comm.pid);
306 dump_printf(": %s:%d\n", self->comm.comm, self->comm.pid);
308 if (thread == NULL || thread__set_comm_adjust(thread, self->comm.comm)) {
309 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
316 int event__process_lost(event_t *self, struct perf_session *session)
318 dump_printf(": id:%Ld: lost:%Ld\n", self->lost.id, self->lost.lost);
319 session->events_stats.lost += self->lost.lost;
323 int event__process_mmap(event_t *self, struct perf_session *session)
325 struct thread *thread;
328 dump_printf(" %d/%d: [%#Lx(%#Lx) @ %#Lx]: %s\n",
329 self->mmap.pid, self->mmap.tid, self->mmap.start,
330 self->mmap.len, self->mmap.pgoff, self->mmap.filename);
332 if (self->mmap.pid == 0) {
333 static const char kmmap_prefix[] = "[kernel.kallsyms.";
335 if (self->mmap.filename[0] == '/') {
336 char short_module_name[1024];
337 char *name = strrchr(self->mmap.filename, '/'), *dot;
343 dot = strrchr(name, '.');
347 snprintf(short_module_name, sizeof(short_module_name),
348 "[%.*s]", (int)(dot - name), name);
349 strxfrchar(short_module_name, '-', '_');
351 map = perf_session__new_module_map(session,
353 self->mmap.filename);
357 name = strdup(short_module_name);
361 map->dso->short_name = name;
362 map->end = map->start + self->mmap.len;
363 } else if (memcmp(self->mmap.filename, kmmap_prefix,
364 sizeof(kmmap_prefix) - 1) == 0) {
365 const char *symbol_name = (self->mmap.filename +
366 sizeof(kmmap_prefix) - 1);
368 * Should be there already, from the build-id table in
371 struct dso *kernel = __dsos__findnew(&dsos__kernel,
372 "[kernel.kallsyms]");
377 if (__perf_session__create_kernel_maps(session, kernel) < 0)
380 session->vmlinux_maps[MAP__FUNCTION]->start = self->mmap.start;
381 session->vmlinux_maps[MAP__FUNCTION]->end = self->mmap.start + self->mmap.len;
383 * Be a bit paranoid here, some perf.data file came with
384 * a zero sized synthesized MMAP event for the kernel.
386 if (session->vmlinux_maps[MAP__FUNCTION]->end == 0)
387 session->vmlinux_maps[MAP__FUNCTION]->end = ~0UL;
389 perf_session__set_kallsyms_ref_reloc_sym(session, symbol_name,
395 thread = perf_session__findnew(session, self->mmap.pid);
396 map = map__new(&self->mmap, MAP__FUNCTION,
397 session->cwd, session->cwdlen);
399 if (thread == NULL || map == NULL)
402 thread__insert_map(thread, map);
406 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
410 int event__process_task(event_t *self, struct perf_session *session)
412 struct thread *thread = perf_session__findnew(session, self->fork.pid);
413 struct thread *parent = perf_session__findnew(session, self->fork.ppid);
415 dump_printf("(%d:%d):(%d:%d)\n", self->fork.pid, self->fork.tid,
416 self->fork.ppid, self->fork.ptid);
418 * A thread clone will have the same PID for both parent and child.
420 if (thread == parent)
423 if (self->header.type == PERF_RECORD_EXIT)
426 if (thread == NULL || parent == NULL ||
427 thread__fork(thread, parent) < 0) {
428 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
435 void thread__find_addr_map(struct thread *self,
436 struct perf_session *session, u8 cpumode,
437 enum map_type type, u64 addr,
438 struct addr_location *al)
440 struct map_groups *mg = &self->mg;
445 if (cpumode == PERF_RECORD_MISC_KERNEL) {
447 mg = &session->kmaps;
448 } else if (cpumode == PERF_RECORD_MISC_USER)
456 al->map = map_groups__find(mg, type, al->addr);
457 if (al->map == NULL) {
459 * If this is outside of all known maps, and is a negative
460 * address, try to look it up in the kernel dso, as it might be
461 * a vsyscall or vdso (which executes in user-mode).
463 * XXX This is nasty, we should have a symbol list in the
464 * "[vdso]" dso, but for now lets use the old trick of looking
465 * in the whole kernel symbol list.
467 if ((long long)al->addr < 0 && mg != &session->kmaps) {
468 mg = &session->kmaps;
472 al->addr = al->map->map_ip(al->map, al->addr);
475 void thread__find_addr_location(struct thread *self,
476 struct perf_session *session, u8 cpumode,
477 enum map_type type, u64 addr,
478 struct addr_location *al,
479 symbol_filter_t filter)
481 thread__find_addr_map(self, session, cpumode, type, addr, al);
483 al->sym = map__find_symbol(al->map, al->addr, filter);
488 static void dso__calc_col_width(struct dso *self)
490 if (!symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
491 (!symbol_conf.dso_list ||
492 strlist__has_entry(symbol_conf.dso_list, self->name))) {
493 unsigned int slen = strlen(self->name);
494 if (slen > dsos__col_width)
495 dsos__col_width = slen;
498 self->slen_calculated = 1;
501 int event__preprocess_sample(const event_t *self, struct perf_session *session,
502 struct addr_location *al, symbol_filter_t filter)
504 u8 cpumode = self->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
505 struct thread *thread = perf_session__findnew(session, self->ip.pid);
510 if (symbol_conf.comm_list &&
511 !strlist__has_entry(symbol_conf.comm_list, thread->comm))
514 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
516 thread__find_addr_location(thread, session, cpumode, MAP__FUNCTION,
517 self->ip.ip, al, filter);
518 dump_printf(" ...... dso: %s\n",
519 al->map ? al->map->dso->long_name :
520 al->level == 'H' ? "[hypervisor]" : "<not found>");
522 * We have to do this here as we may have a dso with no symbol hit that
523 * has a name longer than the ones with symbols sampled.
525 if (al->map && !sort_dso.elide && !al->map->dso->slen_calculated)
526 dso__calc_col_width(al->map->dso);
528 if (symbol_conf.dso_list &&
529 (!al->map || !al->map->dso ||
530 !(strlist__has_entry(symbol_conf.dso_list, al->map->dso->short_name) ||
531 (al->map->dso->short_name != al->map->dso->long_name &&
532 strlist__has_entry(symbol_conf.dso_list, al->map->dso->long_name)))))
535 if (symbol_conf.sym_list && al->sym &&
536 !strlist__has_entry(symbol_conf.sym_list, al->sym->name))
539 al->filtered = false;
547 int event__parse_sample(event_t *event, u64 type, struct sample_data *data)
549 u64 *array = event->sample.array;
551 if (type & PERF_SAMPLE_IP) {
552 data->ip = event->ip.ip;
556 if (type & PERF_SAMPLE_TID) {
557 u32 *p = (u32 *)array;
563 if (type & PERF_SAMPLE_TIME) {
568 if (type & PERF_SAMPLE_ADDR) {
573 if (type & PERF_SAMPLE_ID) {
578 if (type & PERF_SAMPLE_STREAM_ID) {
579 data->stream_id = *array;
583 if (type & PERF_SAMPLE_CPU) {
584 u32 *p = (u32 *)array;
589 if (type & PERF_SAMPLE_PERIOD) {
590 data->period = *array;
594 if (type & PERF_SAMPLE_READ) {
595 pr_debug("PERF_SAMPLE_READ is unsuported for now\n");
599 if (type & PERF_SAMPLE_CALLCHAIN) {
600 data->callchain = (struct ip_callchain *)array;
601 array += 1 + data->callchain->nr;
604 if (type & PERF_SAMPLE_RAW) {
605 u32 *p = (u32 *)array;