2 * probe-file.c : operate ftrace k/uprobe events files
4 * Written by Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
26 #include <api/fs/tracing_path.h>
27 #include "probe-event.h"
28 #include "probe-file.h"
31 #define MAX_CMDLEN 256
33 static void print_open_warning(int err, bool uprobe)
35 char sbuf[STRERR_BUFSIZE];
41 config = "CONFIG_UPROBE_EVENTS";
43 config = "CONFIG_KPROBE_EVENTS";
45 pr_warning("%cprobe_events file does not exist"
46 " - please rebuild kernel with %s.\n",
47 uprobe ? 'u' : 'k', config);
48 } else if (err == -ENOTSUP)
49 pr_warning("Tracefs or debugfs is not mounted.\n");
51 pr_warning("Failed to open %cprobe_events: %s\n",
53 str_error_r(-err, sbuf, sizeof(sbuf)));
56 static void print_both_open_warning(int kerr, int uerr)
58 /* Both kprobes and uprobes are disabled, warn it. */
59 if (kerr == -ENOTSUP && uerr == -ENOTSUP)
60 pr_warning("Tracefs or debugfs is not mounted.\n");
61 else if (kerr == -ENOENT && uerr == -ENOENT)
62 pr_warning("Please rebuild kernel with CONFIG_KPROBE_EVENTS "
63 "or/and CONFIG_UPROBE_EVENTS.\n");
65 char sbuf[STRERR_BUFSIZE];
66 pr_warning("Failed to open kprobe events: %s.\n",
67 str_error_r(-kerr, sbuf, sizeof(sbuf)));
68 pr_warning("Failed to open uprobe events: %s.\n",
69 str_error_r(-uerr, sbuf, sizeof(sbuf)));
73 static int open_probe_events(const char *trace_file, bool readwrite)
78 ret = e_snprintf(buf, PATH_MAX, "%s/%s",
79 tracing_path, trace_file);
81 pr_debug("Opening %s write=%d\n", buf, readwrite);
82 if (readwrite && !probe_event_dry_run)
83 ret = open(buf, O_RDWR | O_APPEND, 0);
85 ret = open(buf, O_RDONLY, 0);
93 static int open_kprobe_events(bool readwrite)
95 return open_probe_events("kprobe_events", readwrite);
98 static int open_uprobe_events(bool readwrite)
100 return open_probe_events("uprobe_events", readwrite);
103 int probe_file__open(int flag)
107 if (flag & PF_FL_UPROBE)
108 fd = open_uprobe_events(flag & PF_FL_RW);
110 fd = open_kprobe_events(flag & PF_FL_RW);
112 print_open_warning(fd, flag & PF_FL_UPROBE);
117 int probe_file__open_both(int *kfd, int *ufd, int flag)
122 *kfd = open_kprobe_events(flag & PF_FL_RW);
123 *ufd = open_uprobe_events(flag & PF_FL_RW);
124 if (*kfd < 0 && *ufd < 0) {
125 print_both_open_warning(*kfd, *ufd);
132 /* Get raw string list of current kprobe_events or uprobe_events */
133 struct strlist *probe_file__get_rawlist(int fd)
137 char buf[MAX_CMDLEN];
144 sl = strlist__new(NULL, NULL);
152 fp = fdopen(fddup, "r");
154 goto out_close_fddup;
157 p = fgets(buf, MAX_CMDLEN, fp);
164 ret = strlist__add(sl, buf);
166 pr_debug("strlist__add failed (%d)\n", ret);
184 static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
187 struct strlist *sl, *rawlist;
188 struct str_node *ent;
189 struct probe_trace_event tev;
192 memset(&tev, 0, sizeof(tev));
193 rawlist = probe_file__get_rawlist(fd);
196 sl = strlist__new(NULL, NULL);
197 strlist__for_each_entry(ent, rawlist) {
198 ret = parse_probe_trace_command(ent->s, &tev);
202 ret = e_snprintf(buf, 128, "%s:%s", tev.group,
205 ret = strlist__add(sl, buf);
207 ret = strlist__add(sl, tev.event);
208 clear_probe_trace_event(&tev);
212 strlist__delete(rawlist);
221 /* Get current perf-probe event names */
222 struct strlist *probe_file__get_namelist(int fd)
224 return __probe_file__get_namelist(fd, false);
227 int probe_file__add_event(int fd, struct probe_trace_event *tev)
230 char *buf = synthesize_probe_trace_command(tev);
231 char sbuf[STRERR_BUFSIZE];
234 pr_debug("Failed to synthesize probe trace event.\n");
238 pr_debug("Writing event: %s\n", buf);
239 if (!probe_event_dry_run) {
240 if (write(fd, buf, strlen(buf)) < (int)strlen(buf)) {
242 pr_warning("Failed to write event: %s\n",
243 str_error_r(errno, sbuf, sizeof(sbuf)));
251 static int __del_trace_probe_event(int fd, struct str_node *ent)
257 /* Convert from perf-probe event to trace-probe event */
258 ret = e_snprintf(buf, 128, "-:%s", ent->s);
262 p = strchr(buf + 2, ':');
264 pr_debug("Internal error: %s should have ':' but not.\n",
271 pr_debug("Writing event: %s\n", buf);
272 ret = write(fd, buf, strlen(buf));
280 pr_warning("Failed to delete event: %s\n",
281 str_error_r(-ret, buf, sizeof(buf)));
285 int probe_file__get_events(int fd, struct strfilter *filter,
286 struct strlist *plist)
288 struct strlist *namelist;
289 struct str_node *ent;
296 namelist = __probe_file__get_namelist(fd, true);
300 strlist__for_each_entry(ent, namelist) {
301 p = strchr(ent->s, ':');
302 if ((p && strfilter__compare(filter, p + 1)) ||
303 strfilter__compare(filter, ent->s)) {
304 strlist__add(plist, ent->s);
308 strlist__delete(namelist);
313 int probe_file__del_strlist(int fd, struct strlist *namelist)
316 struct str_node *ent;
318 strlist__for_each_entry(ent, namelist) {
319 ret = __del_trace_probe_event(fd, ent);
326 int probe_file__del_events(int fd, struct strfilter *filter)
328 struct strlist *namelist;
331 namelist = strlist__new(NULL, NULL);
335 ret = probe_file__get_events(fd, filter, namelist);
339 ret = probe_file__del_strlist(fd, namelist);
340 strlist__delete(namelist);
345 /* Caller must ensure to remove this entry from list */
346 static void probe_cache_entry__delete(struct probe_cache_entry *entry)
349 BUG_ON(!list_empty(&entry->node));
351 strlist__delete(entry->tevlist);
352 clear_perf_probe_event(&entry->pev);
358 static struct probe_cache_entry *
359 probe_cache_entry__new(struct perf_probe_event *pev)
361 struct probe_cache_entry *entry = zalloc(sizeof(*entry));
364 INIT_LIST_HEAD(&entry->node);
365 entry->tevlist = strlist__new(NULL, NULL);
369 entry->spev = synthesize_perf_probe_command(pev);
371 perf_probe_event__copy(&entry->pev, pev) < 0) {
372 probe_cache_entry__delete(entry);
381 int probe_cache_entry__get_event(struct probe_cache_entry *entry,
382 struct probe_trace_event **tevs)
384 struct probe_trace_event *tev;
385 struct str_node *node;
388 ret = strlist__nr_entries(entry->tevlist);
389 if (ret > probe_conf.max_probes)
392 *tevs = zalloc(ret * sizeof(*tev));
397 strlist__for_each_entry(node, entry->tevlist) {
399 ret = parse_probe_trace_command(node->s, tev);
406 /* For the kernel probe caches, pass target = NULL or DSO__NAME_KALLSYMS */
407 static int probe_cache__open(struct probe_cache *pcache, const char *target)
409 char cpath[PATH_MAX];
410 char sbuildid[SBUILD_ID_SIZE];
411 char *dir_name = NULL;
412 bool is_kallsyms = false;
415 if (target && build_id_cache__cached(target)) {
416 /* This is a cached buildid */
417 strncpy(sbuildid, target, SBUILD_ID_SIZE);
418 dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
422 if (!target || !strcmp(target, DSO__NAME_KALLSYMS)) {
423 target = DSO__NAME_KALLSYMS;
425 ret = sysfs__sprintf_build_id("/", sbuildid);
427 ret = filename__sprintf_build_id(target, sbuildid);
430 pr_debug("Failed to get build-id from %s.\n", target);
434 /* If we have no buildid cache, make it */
435 if (!build_id_cache__cached(sbuildid)) {
436 ret = build_id_cache__add_s(sbuildid, target,
439 pr_debug("Failed to add build-id cache: %s\n", target);
444 dir_name = build_id_cache__cachedir(sbuildid, target, is_kallsyms,
448 pr_debug("Failed to get cache from %s\n", target);
452 snprintf(cpath, PATH_MAX, "%s/probes", dir_name);
453 fd = open(cpath, O_CREAT | O_RDWR, 0644);
455 pr_debug("Failed to open cache(%d): %s\n", fd, cpath);
462 static int probe_cache__load(struct probe_cache *pcache)
464 struct probe_cache_entry *entry = NULL;
465 char buf[MAX_CMDLEN], *p;
469 fddup = dup(pcache->fd);
472 fp = fdopen(fddup, "r");
479 if (!fgets(buf, MAX_CMDLEN, fp))
481 p = strchr(buf, '\n');
484 /* #perf_probe_event or %sdt_event */
485 if (buf[0] == '#' || buf[0] == '%') {
486 entry = probe_cache_entry__new(NULL);
493 entry->spev = strdup(buf + 1);
495 ret = parse_perf_probe_command(buf + 1,
500 probe_cache_entry__delete(entry);
503 list_add_tail(&entry->node, &pcache->entries);
504 } else { /* trace_probe_event */
509 strlist__add(entry->tevlist, buf);
517 static struct probe_cache *probe_cache__alloc(void)
519 struct probe_cache *pcache = zalloc(sizeof(*pcache));
522 INIT_LIST_HEAD(&pcache->entries);
523 pcache->fd = -EINVAL;
528 void probe_cache__purge(struct probe_cache *pcache)
530 struct probe_cache_entry *entry, *n;
532 list_for_each_entry_safe(entry, n, &pcache->entries, node) {
533 list_del_init(&entry->node);
534 probe_cache_entry__delete(entry);
538 void probe_cache__delete(struct probe_cache *pcache)
543 probe_cache__purge(pcache);
549 struct probe_cache *probe_cache__new(const char *target)
551 struct probe_cache *pcache = probe_cache__alloc();
557 ret = probe_cache__open(pcache, target);
559 pr_debug("Cache open error: %d\n", ret);
563 ret = probe_cache__load(pcache);
565 pr_debug("Cache read error: %d\n", ret);
572 probe_cache__delete(pcache);
576 static bool streql(const char *a, const char *b)
584 return !strcmp(a, b);
587 struct probe_cache_entry *
588 probe_cache__find(struct probe_cache *pcache, struct perf_probe_event *pev)
590 struct probe_cache_entry *entry = NULL;
591 char *cmd = synthesize_perf_probe_command(pev);
596 for_each_probe_cache_entry(entry, pcache) {
598 if (entry->pev.event &&
599 streql(entry->pev.event, pev->event) &&
601 streql(entry->pev.group, pev->group)))
606 /* Hit if same event name or same command-string */
608 (streql(entry->pev.group, pev->group) &&
609 streql(entry->pev.event, pev->event))) ||
610 (!strcmp(entry->spev, cmd)))
620 struct probe_cache_entry *
621 probe_cache__find_by_name(struct probe_cache *pcache,
622 const char *group, const char *event)
624 struct probe_cache_entry *entry = NULL;
626 for_each_probe_cache_entry(entry, pcache) {
627 /* Hit if same event name or same command-string */
628 if (streql(entry->pev.group, group) &&
629 streql(entry->pev.event, event))
638 int probe_cache__add_entry(struct probe_cache *pcache,
639 struct perf_probe_event *pev,
640 struct probe_trace_event *tevs, int ntevs)
642 struct probe_cache_entry *entry = NULL;
646 if (!pcache || !pev || !tevs || ntevs <= 0) {
651 /* Remove old cache entry */
652 entry = probe_cache__find(pcache, pev);
654 list_del_init(&entry->node);
655 probe_cache_entry__delete(entry);
659 entry = probe_cache_entry__new(pev);
663 for (i = 0; i < ntevs; i++) {
664 if (!tevs[i].point.symbol)
667 command = synthesize_probe_trace_command(&tevs[i]);
670 strlist__add(entry->tevlist, command);
673 list_add_tail(&entry->node, &pcache->entries);
674 pr_debug("Added probe cache: %d\n", ntevs);
678 pr_debug("Failed to add probe caches\n");
679 probe_cache_entry__delete(entry);
683 #ifdef HAVE_GELF_GETNOTE_SUPPORT
684 static unsigned long long sdt_note__get_addr(struct sdt_note *note)
686 return note->bit32 ? (unsigned long long)note->addr.a32[0]
687 : (unsigned long long)note->addr.a64[0];
690 int probe_cache__scan_sdt(struct probe_cache *pcache, const char *pathname)
692 struct probe_cache_entry *entry = NULL;
693 struct list_head sdtlist;
694 struct sdt_note *note;
699 INIT_LIST_HEAD(&sdtlist);
700 ret = get_sdt_note_list(&sdtlist, pathname);
702 pr_debug4("Failed to get sdt note: %d\n", ret);
705 list_for_each_entry(note, &sdtlist, note_list) {
706 ret = snprintf(sdtgrp, 64, "sdt_%s", note->provider);
709 /* Try to find same-name entry */
710 entry = probe_cache__find_by_name(pcache, sdtgrp, note->name);
712 entry = probe_cache_entry__new(NULL);
718 ret = asprintf(&entry->spev, "%s:%s=%s", sdtgrp,
719 note->name, note->name);
722 entry->pev.event = strdup(note->name);
723 entry->pev.group = strdup(sdtgrp);
724 list_add_tail(&entry->node, &pcache->entries);
726 ret = asprintf(&buf, "p:%s/%s %s:0x%llx",
727 sdtgrp, note->name, pathname,
728 sdt_note__get_addr(note));
731 strlist__add(entry->tevlist, buf);
736 list_del_init(&entry->node);
737 probe_cache_entry__delete(entry);
739 cleanup_sdt_note_list(&sdtlist);
744 static int probe_cache_entry__write(struct probe_cache_entry *entry, int fd)
746 struct str_node *snode;
749 const char *prefix = entry->sdt ? "%" : "#";
751 /* Save stat for rollback */
752 ret = fstat(fd, &st);
756 pr_debug("Writing cache: %s%s\n", prefix, entry->spev);
757 iov[0].iov_base = (void *)prefix; iov[0].iov_len = 1;
758 iov[1].iov_base = entry->spev; iov[1].iov_len = strlen(entry->spev);
759 iov[2].iov_base = (void *)"\n"; iov[2].iov_len = 1;
760 ret = writev(fd, iov, 3);
761 if (ret < (int)iov[1].iov_len + 2)
764 strlist__for_each_entry(snode, entry->tevlist) {
765 iov[0].iov_base = (void *)snode->s;
766 iov[0].iov_len = strlen(snode->s);
767 iov[1].iov_base = (void *)"\n"; iov[1].iov_len = 1;
768 ret = writev(fd, iov, 2);
769 if (ret < (int)iov[0].iov_len + 1)
775 /* Rollback to avoid cache file corruption */
778 if (ftruncate(fd, st.st_size) < 0)
784 int probe_cache__commit(struct probe_cache *pcache)
786 struct probe_cache_entry *entry;
789 /* TBD: if we do not update existing entries, skip it */
790 ret = lseek(pcache->fd, 0, SEEK_SET);
794 ret = ftruncate(pcache->fd, 0);
798 for_each_probe_cache_entry(entry, pcache) {
799 ret = probe_cache_entry__write(entry, pcache->fd);
800 pr_debug("Cache committed: %d\n", ret);
808 static bool probe_cache_entry__compare(struct probe_cache_entry *entry,
809 struct strfilter *filter)
811 char buf[128], *ptr = entry->spev;
813 if (entry->pev.event) {
814 snprintf(buf, 128, "%s:%s", entry->pev.group, entry->pev.event);
817 return strfilter__compare(filter, ptr);
820 int probe_cache__filter_purge(struct probe_cache *pcache,
821 struct strfilter *filter)
823 struct probe_cache_entry *entry, *tmp;
825 list_for_each_entry_safe(entry, tmp, &pcache->entries, node) {
826 if (probe_cache_entry__compare(entry, filter)) {
827 pr_info("Removed cached event: %s\n", entry->spev);
828 list_del_init(&entry->node);
829 probe_cache_entry__delete(entry);
835 static int probe_cache__show_entries(struct probe_cache *pcache,
836 struct strfilter *filter)
838 struct probe_cache_entry *entry;
840 for_each_probe_cache_entry(entry, pcache) {
841 if (probe_cache_entry__compare(entry, filter))
842 printf("%s\n", entry->spev);
847 /* Show all cached probes */
848 int probe_cache__show_all_caches(struct strfilter *filter)
850 struct probe_cache *pcache;
851 struct strlist *bidlist;
853 char *buf = strfilter__string(filter);
855 pr_debug("list cache with filter: %s\n", buf);
858 bidlist = build_id_cache__list_all(true);
860 pr_debug("Failed to get buildids: %d\n", errno);
863 strlist__for_each_entry(nd, bidlist) {
864 pcache = probe_cache__new(nd->s);
867 if (!list_empty(&pcache->entries)) {
868 buf = build_id_cache__origname(nd->s);
869 printf("%s (%s):\n", buf, nd->s);
871 probe_cache__show_entries(pcache, filter);
873 probe_cache__delete(pcache);
875 strlist__delete(bidlist);
884 } probe_type_table[] = {
885 #define DEFINE_TYPE(idx, pat, def_avail) \
886 [idx] = {.pattern = pat, .avail = (def_avail)}
887 DEFINE_TYPE(PROBE_TYPE_U, "* u8/16/32/64,*", true),
888 DEFINE_TYPE(PROBE_TYPE_S, "* s8/16/32/64,*", true),
889 DEFINE_TYPE(PROBE_TYPE_X, "* x8/16/32/64,*", false),
890 DEFINE_TYPE(PROBE_TYPE_STRING, "* string,*", true),
891 DEFINE_TYPE(PROBE_TYPE_BITFIELD,
892 "* b<bit-width>@<bit-offset>/<container-size>", true),
895 bool probe_type_is_available(enum probe_type type)
900 bool target_line = false;
901 bool ret = probe_type_table[type].avail;
903 if (type >= PROBE_TYPE_END)
905 /* We don't have to check the type which supported by default */
906 if (ret || probe_type_table[type].checked)
909 if (asprintf(&buf, "%s/README", tracing_path) < 0)
912 fp = fopen(buf, "r");
917 while (getline(&buf, &len, fp) > 0 && !ret) {
919 target_line = !!strstr(buf, " type: ");
922 } else if (strstr(buf, "\t ") != buf)
924 ret = strglobmatch(buf, probe_type_table[type].pattern);
926 /* Cache the result */
927 probe_type_table[type].checked = true;
928 probe_type_table[type].avail = ret;