1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017, 2018, 2019, 2021 BMW Car IT GmbH
4 * Author: Viktor Rosendahl (viktor.rosendahl@bmw.de)
8 #define _POSIX_C_SOURCE 200809L
21 #include <linux/unistd.h>
23 #include <sys/inotify.h>
28 static const char *prg_name;
29 static const char *prg_unknown = "unknown program name";
33 static int sched_policy;
34 static bool sched_policy_set;
37 static bool sched_pri_set;
39 static bool trace_enable = true;
40 static bool setup_ftrace = true;
41 static bool use_random_sleep;
44 C(FUNC_TR, "function-trace"), \
45 C(DISP_GR, "display-graph"), \
49 #define C(a, b) OPTIDX_##a
58 static const char *const optstr[] = {
68 static bool use_options[OPTIDX_NR];
70 static char inotify_buffer[655360];
72 #define likely(x) __builtin_expect(!!(x), 1)
73 #define unlikely(x) __builtin_expect(!!(x), 0)
74 #define bool2str(x) (x ? "true":"false")
76 #define DEFAULT_NR_PRINTER_THREADS (3)
77 static unsigned int nr_threads = DEFAULT_NR_PRINTER_THREADS;
79 #define DEFAULT_TABLE_SIZE (2)
80 static unsigned int table_startsize = DEFAULT_TABLE_SIZE;
84 #define verbose_sizechange() (verbosity >= 1)
85 #define verbose_lostevent() (verbosity >= 2)
86 #define verbose_ftrace() (verbosity >= 1)
88 #define was_changed(ORIG, CUR) (strcmp(ORIG, CUR) != 0)
89 #define needs_change(CUR, WANTED) (strcmp(CUR, WANTED) != 0)
91 static const char *debug_tracefile;
92 static const char *debug_tracefile_dflt;
93 static const char *debug_maxlat;
94 static const char *debug_maxlat_dflt;
95 static const char * const DEBUG_NOFILE = "[file not found]";
97 static const char * const TR_MAXLAT = "tracing_max_latency";
98 static const char * const TR_THRESH = "tracing_thresh";
99 static const char * const TR_CURRENT = "current_tracer";
100 static const char * const TR_OPTIONS = "trace_options";
102 static const char * const NOP_TRACER = "nop";
104 static const char * const OPT_NO_PREFIX = "no";
106 #define DFLT_THRESHOLD_US "0"
107 static const char *threshold = DFLT_THRESHOLD_US;
109 #define DEV_URANDOM "/dev/urandom"
110 #define RT_DEFAULT_PRI (99)
111 #define DEFAULT_PRI (0)
113 #define USEC_PER_MSEC (1000L)
114 #define NSEC_PER_USEC (1000L)
115 #define NSEC_PER_MSEC (USEC_PER_MSEC * NSEC_PER_USEC)
117 #define MSEC_PER_SEC (1000L)
118 #define USEC_PER_SEC (USEC_PER_MSEC * MSEC_PER_SEC)
119 #define NSEC_PER_SEC (NSEC_PER_MSEC * MSEC_PER_SEC)
121 #define SLEEP_TIME_MS_DEFAULT (1000L)
122 #define TRY_PRINTMUTEX_MS (1000)
124 static long sleep_time = (USEC_PER_MSEC * SLEEP_TIME_MS_DEFAULT);
126 static const char * const queue_full_warning =
127 "Could not queue trace for printing. It is likely that events happen faster\n"
128 "than what they can be printed. Probably partly because of random sleeping\n";
130 static const char * const no_tracer_msg =
131 "Could not find any tracers! Running this program as root may help!\n";
133 static const char * const no_latency_tr_msg =
134 "No latency tracers are supported by your kernel!\n";
142 static const struct policy policies[] = {
143 { "other", SCHED_OTHER, DEFAULT_PRI },
144 { "batch", SCHED_BATCH, DEFAULT_PRI },
145 { "idle", SCHED_IDLE, DEFAULT_PRI },
146 { "rr", SCHED_RR, RT_DEFAULT_PRI },
147 { "fifo", SCHED_FIFO, RT_DEFAULT_PRI },
148 { NULL, 0, DEFAULT_PRI }
152 * The default tracer will be the first on this list that is supported by the
153 * currently running Linux kernel.
155 static const char * const relevant_tracers[] = {
165 /* This is the list of tracers for which random sleep makes sense */
166 static const char * const random_tracers[] = {
173 static const char *current_tracer;
174 static bool force_tracer;
176 struct ftrace_state {
180 bool opt_valid[OPTIDX_NR];
181 pthread_mutex_t mutex;
186 int ticket_completed_ref;
191 int ticket_completed;
192 pthread_mutex_t mutex;
195 pthread_mutex_t cnt_mutex;
203 static struct print_state printstate;
204 static struct ftrace_state save_state;
205 volatile sig_atomic_t signal_flag;
207 #define PROB_TABLE_MAX_SIZE (1000)
209 int probabilities[PROB_TABLE_MAX_SIZE];
214 pthread_mutex_t mutex;
217 static struct sleep_table sleeptable;
219 #define QUEUE_SIZE (10)
222 struct entry entries[QUEUE_SIZE];
225 pthread_mutex_t mutex;
229 #define MAX_THREADS (40)
231 struct queue printqueue;
232 pthread_t printthread[MAX_THREADS];
233 pthread_mutex_t print_mtx;
234 #define PRINT_BUFFER_SIZE (16 * 1024 * 1024)
236 static void cleanup_exit(int status);
237 static int set_trace_opt(const char *opt, bool value);
239 static __always_inline void *malloc_or_die(size_t size)
241 void *ptr = malloc(size);
243 if (unlikely(ptr == NULL)) {
244 warn("malloc() failed");
245 cleanup_exit(EXIT_FAILURE);
250 static __always_inline void *malloc_or_die_nocleanup(size_t size)
252 void *ptr = malloc(size);
254 if (unlikely(ptr == NULL))
255 err(0, "malloc() failed");
259 static __always_inline void write_or_die(int fd, const char *buf, size_t count)
264 r = write(fd, buf, count);
265 if (unlikely(r < 0)) {
268 warn("write() failed");
269 cleanup_exit(EXIT_FAILURE);
276 static __always_inline void clock_gettime_or_die(clockid_t clk_id,
279 int r = clock_gettime(clk_id, tp);
281 if (unlikely(r != 0))
282 err(EXIT_FAILURE, "clock_gettime() failed");
285 static __always_inline void sigemptyset_or_die(sigset_t *s)
287 if (unlikely(sigemptyset(s) != 0)) {
288 warn("sigemptyset() failed");
289 cleanup_exit(EXIT_FAILURE);
293 static __always_inline void sigaddset_or_die(sigset_t *s, int signum)
295 if (unlikely(sigaddset(s, signum) != 0)) {
296 warn("sigemptyset() failed");
297 cleanup_exit(EXIT_FAILURE);
301 static __always_inline void sigaction_or_die(int signum,
302 const struct sigaction *act,
303 struct sigaction *oldact)
305 if (unlikely(sigaction(signum, act, oldact) != 0)) {
306 warn("sigaction() failed");
307 cleanup_exit(EXIT_FAILURE);
311 static void open_stdout(void)
313 if (setvbuf(stdout, NULL, _IONBF, 0) != 0)
314 err(EXIT_FAILURE, "setvbuf() failed");
315 fd_stdout = fileno(stdout);
317 err(EXIT_FAILURE, "fileno() failed");
321 * It's not worth it to call cleanup_exit() from mutex functions because
322 * cleanup_exit() uses mutexes.
324 static __always_inline void mutex_lock(pthread_mutex_t *mtx)
326 errno = pthread_mutex_lock(mtx);
328 err(EXIT_FAILURE, "pthread_mutex_lock() failed");
332 static __always_inline void mutex_unlock(pthread_mutex_t *mtx)
334 errno = pthread_mutex_unlock(mtx);
336 err(EXIT_FAILURE, "pthread_mutex_unlock() failed");
339 static __always_inline void cond_signal(pthread_cond_t *cond)
341 errno = pthread_cond_signal(cond);
343 err(EXIT_FAILURE, "pthread_cond_signal() failed");
346 static __always_inline void cond_wait(pthread_cond_t *restrict cond,
347 pthread_mutex_t *restrict mutex)
349 errno = pthread_cond_wait(cond, mutex);
351 err(EXIT_FAILURE, "pthread_cond_wait() failed");
354 static __always_inline void cond_broadcast(pthread_cond_t *cond)
356 errno = pthread_cond_broadcast(cond);
358 err(EXIT_FAILURE, "pthread_cond_broadcast() failed");
361 static __always_inline void
362 mutex_init(pthread_mutex_t *mutex,
363 const pthread_mutexattr_t *attr)
365 errno = pthread_mutex_init(mutex, attr);
367 err(EXIT_FAILURE, "pthread_mutex_init() failed");
370 static __always_inline void mutexattr_init(pthread_mutexattr_t *attr)
372 errno = pthread_mutexattr_init(attr);
374 err(EXIT_FAILURE, "pthread_mutexattr_init() failed");
377 static __always_inline void mutexattr_destroy(pthread_mutexattr_t *attr)
379 errno = pthread_mutexattr_destroy(attr);
381 err(EXIT_FAILURE, "pthread_mutexattr_destroy() failed");
384 static __always_inline void mutexattr_settype(pthread_mutexattr_t *attr,
387 errno = pthread_mutexattr_settype(attr, type);
389 err(EXIT_FAILURE, "pthread_mutexattr_settype() failed");
392 static __always_inline void condattr_init(pthread_condattr_t *attr)
394 errno = pthread_condattr_init(attr);
396 err(EXIT_FAILURE, "pthread_condattr_init() failed");
399 static __always_inline void condattr_destroy(pthread_condattr_t *attr)
401 errno = pthread_condattr_destroy(attr);
403 err(EXIT_FAILURE, "pthread_condattr_destroy() failed");
406 static __always_inline void condattr_setclock(pthread_condattr_t *attr,
409 errno = pthread_condattr_setclock(attr, clock_id);
411 err(EXIT_FAILURE, "pthread_condattr_setclock() failed");
414 static __always_inline void cond_init(pthread_cond_t *cond,
415 const pthread_condattr_t *attr)
417 errno = pthread_cond_init(cond, attr);
419 err(EXIT_FAILURE, "pthread_cond_init() failed");
422 static __always_inline int
423 cond_timedwait(pthread_cond_t *restrict cond,
424 pthread_mutex_t *restrict mutex,
425 const struct timespec *restrict abstime)
427 errno = pthread_cond_timedwait(cond, mutex, abstime);
428 if (errno && errno != ETIMEDOUT)
429 err(EXIT_FAILURE, "pthread_cond_timedwait() failed");
433 static void init_printstate(void)
435 pthread_condattr_t cattr;
437 printstate.ticket_counter = 0;
438 printstate.ticket_completed = 0;
441 mutex_init(&printstate.mutex, NULL);
443 condattr_init(&cattr);
444 condattr_setclock(&cattr, CLOCK_MONOTONIC);
445 cond_init(&printstate.cond, &cattr);
446 condattr_destroy(&cattr);
449 static void init_print_mtx(void)
451 pthread_mutexattr_t mattr;
453 mutexattr_init(&mattr);
454 mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
455 mutex_init(&print_mtx, &mattr);
456 mutexattr_destroy(&mattr);
460 static void signal_blocking(int how)
464 sigemptyset_or_die(&s);
465 sigaddset_or_die(&s, SIGHUP);
466 sigaddset_or_die(&s, SIGTERM);
467 sigaddset_or_die(&s, SIGINT);
469 errno = pthread_sigmask(how, &s, NULL);
470 if (unlikely(errno)) {
471 warn("pthread_sigmask() failed");
472 cleanup_exit(EXIT_FAILURE);
476 static void signal_handler(int num)
481 static void setup_sig_handler(void)
485 memset(&sa, 0, sizeof(sa));
486 sa.sa_handler = signal_handler;
488 sigaction_or_die(SIGHUP, &sa, NULL);
489 sigaction_or_die(SIGTERM, &sa, NULL);
490 sigaction_or_die(SIGINT, &sa, NULL);
493 static void process_signal(int signal)
497 name = strsignal(signal);
499 printf("Received signal %d\n", signal);
501 printf("Received signal %d (%s)\n", signal, name);
502 cleanup_exit(EXIT_SUCCESS);
505 static __always_inline void check_signals(void)
507 int signal = signal_flag;
509 if (unlikely(signal))
510 process_signal(signal);
513 static __always_inline void get_time_in_future(struct timespec *future,
518 clock_gettime_or_die(CLOCK_MONOTONIC, future);
519 future->tv_sec += time_us / USEC_PER_SEC;
520 nsec = future->tv_nsec + (time_us * NSEC_PER_USEC) % NSEC_PER_SEC;
521 if (nsec >= NSEC_PER_SEC) {
522 future->tv_nsec = nsec % NSEC_PER_SEC;
527 static __always_inline bool time_has_passed(const struct timespec *time)
531 clock_gettime_or_die(CLOCK_MONOTONIC, &now);
532 if (now.tv_sec > time->tv_sec)
534 if (now.tv_sec < time->tv_sec)
536 return (now.tv_nsec >= time->tv_nsec);
539 static bool mutex_trylock_limit(pthread_mutex_t *mutex, int time_ms)
541 long time_us = time_ms * USEC_PER_MSEC;
542 struct timespec limit;
544 get_time_in_future(&limit, time_us);
546 errno = pthread_mutex_trylock(mutex);
547 if (errno && errno != EBUSY)
548 err(EXIT_FAILURE, "pthread_mutex_trylock() failed");
549 } while (errno && !time_has_passed(&limit));
553 static void restore_trace_opts(const struct ftrace_state *state,
559 for (i = 0; i < OPTIDX_NR; i++)
560 if (state->opt_valid[i] && state->opt[i] != cur[i]) {
561 r = set_trace_opt(optstr[i], state->opt[i]);
563 warnx("Failed to restore the %s option to %s",
564 optstr[i], bool2str(state->opt[i]));
565 else if (verbose_ftrace())
566 printf("Restored the %s option in %s to %s\n",
567 optstr[i], TR_OPTIONS,
568 bool2str(state->opt[i]));
572 static char *read_file(const char *file, enum errhandling h)
576 static const char *emsg = "Failed to read the %s file";
578 r = tracefs_instance_file_read(NULL, file, &psize);
582 if (h == ERR_CLEANUP)
583 cleanup_exit(EXIT_FAILURE);
585 errx(EXIT_FAILURE, emsg, file);
588 if (r && r[psize - 1] == '\n')
593 static void restore_file(const char *file, char **saved, const char *cur)
595 if (*saved && was_changed(*saved, cur)) {
596 if (tracefs_instance_file_write(NULL, file, *saved) < 0)
597 warnx("Failed to restore %s to %s!", file, *saved);
598 else if (verbose_ftrace())
599 printf("Restored %s to %s\n", file, *saved);
605 static void restore_ftrace(void)
607 mutex_lock(&save_state.mutex);
609 restore_file(TR_CURRENT, &save_state.tracer, current_tracer);
610 restore_file(TR_THRESH, &save_state.thresh, threshold);
611 restore_trace_opts(&save_state, use_options);
613 mutex_unlock(&save_state.mutex);
616 static void cleanup_exit(int status)
624 * We try the print_mtx for 1 sec in order to avoid garbled
625 * output if possible, but if it cannot be obtained we proceed anyway.
627 mutex_trylock_limit(&print_mtx, TRY_PRINTMUTEX_MS);
629 maxlat = read_file(TR_MAXLAT, ERR_WARN);
631 printf("The maximum detected latency was: %sus\n", maxlat);
637 * We do not need to unlock the print_mtx here because we will exit at
638 * the end of this function. Unlocking print_mtx causes problems if a
639 * print thread happens to be waiting for the mutex because we have
640 * just changed the ftrace settings to the original and thus the
641 * print thread would output incorrect data from ftrace.
646 static void init_save_state(void)
648 pthread_mutexattr_t mattr;
650 mutexattr_init(&mattr);
651 mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
652 mutex_init(&save_state.mutex, &mattr);
653 mutexattr_destroy(&mattr);
655 save_state.tracer = NULL;
656 save_state.thresh = NULL;
657 save_state.opt_valid[OPTIDX_FUNC_TR] = false;
658 save_state.opt_valid[OPTIDX_DISP_GR] = false;
661 static int printstate_next_ticket(struct entry *req)
665 r = ++(printstate.ticket_counter);
667 req->ticket_completed_ref = printstate.ticket_completed;
668 cond_broadcast(&printstate.cond);
672 static __always_inline
673 void printstate_mark_req_completed(const struct entry *req)
675 if (req->ticket > printstate.ticket_completed)
676 printstate.ticket_completed = req->ticket;
679 static __always_inline
680 bool printstate_has_new_req_arrived(const struct entry *req)
682 return (printstate.ticket_counter != req->ticket);
685 static __always_inline int printstate_cnt_inc(void)
689 mutex_lock(&printstate.cnt_mutex);
690 value = ++printstate.cnt;
691 mutex_unlock(&printstate.cnt_mutex);
695 static __always_inline int printstate_cnt_dec(void)
699 mutex_lock(&printstate.cnt_mutex);
700 value = --printstate.cnt;
701 mutex_unlock(&printstate.cnt_mutex);
705 static __always_inline int printstate_cnt_read(void)
709 mutex_lock(&printstate.cnt_mutex);
710 value = printstate.cnt;
711 mutex_unlock(&printstate.cnt_mutex);
715 static __always_inline
716 bool prev_req_won_race(const struct entry *req)
718 return (printstate.ticket_completed != req->ticket_completed_ref);
721 static void sleeptable_resize(int size, bool printout, struct short_msg *msg)
727 if (unlikely(size > PROB_TABLE_MAX_SIZE))
728 bytes = snprintf(msg->buf, sizeof(msg->buf),
729 "Cannot increase probability table to %d (maximum size reached)\n", size);
731 bytes = snprintf(msg->buf, sizeof(msg->buf),
732 "Increasing probability table to %d\n", size);
734 warn("snprintf() failed");
739 if (unlikely(size < 0)) {
740 /* Should never happen */
741 warnx("Bad program state at %s:%d", __FILE__, __LINE__);
742 cleanup_exit(EXIT_FAILURE);
745 sleeptable.size = size;
746 sleeptable.table = &probabilities[PROB_TABLE_MAX_SIZE - size];
749 static void init_probabilities(void)
754 for (i = 0; i < PROB_TABLE_MAX_SIZE; i++) {
755 probabilities[i] = 1000 / j;
758 mutex_init(&sleeptable.mutex, NULL);
761 static int table_get_probability(const struct entry *req,
762 struct short_msg *msg)
764 int diff = req->ticket - req->ticket_completed_ref;
769 /* Should never happen...*/
770 if (unlikely(diff < 0)) {
771 warnx("Programmer assumption error at %s:%d\n", __FILE__,
773 cleanup_exit(EXIT_FAILURE);
775 mutex_lock(&sleeptable.mutex);
776 if (diff >= (sleeptable.size - 1)) {
777 rval = sleeptable.table[sleeptable.size - 1];
778 sleeptable_resize(sleeptable.size + 1, verbose_sizechange(),
781 rval = sleeptable.table[diff];
783 mutex_unlock(&sleeptable.mutex);
787 static void init_queue(struct queue *q)
789 q->next_prod_idx = 0;
790 q->next_cons_idx = 0;
791 mutex_init(&q->mutex, NULL);
792 errno = pthread_cond_init(&q->cond, NULL);
794 err(EXIT_FAILURE, "pthread_cond_init() failed");
797 static __always_inline int queue_len(const struct queue *q)
799 if (q->next_prod_idx >= q->next_cons_idx)
800 return q->next_prod_idx - q->next_cons_idx;
802 return QUEUE_SIZE - q->next_cons_idx + q->next_prod_idx;
805 static __always_inline int queue_nr_free(const struct queue *q)
807 int nr_free = QUEUE_SIZE - queue_len(q);
810 * If there is only one slot left we will anyway lie and claim that the
811 * queue is full because adding an element will make it appear empty
818 static __always_inline void queue_idx_inc(int *idx)
820 *idx = (*idx + 1) % QUEUE_SIZE;
823 static __always_inline void queue_push_to_back(struct queue *q,
824 const struct entry *e)
826 q->entries[q->next_prod_idx] = *e;
827 queue_idx_inc(&q->next_prod_idx);
830 static __always_inline struct entry queue_pop_from_front(struct queue *q)
832 struct entry e = q->entries[q->next_cons_idx];
834 queue_idx_inc(&q->next_cons_idx);
838 static __always_inline void queue_cond_signal(struct queue *q)
840 cond_signal(&q->cond);
843 static __always_inline void queue_cond_wait(struct queue *q)
845 cond_wait(&q->cond, &q->mutex);
848 static __always_inline int queue_try_to_add_entry(struct queue *q,
849 const struct entry *e)
853 mutex_lock(&q->mutex);
854 if (queue_nr_free(q) > 0) {
855 queue_push_to_back(q, e);
856 cond_signal(&q->cond);
859 mutex_unlock(&q->mutex);
863 static struct entry queue_wait_for_entry(struct queue *q)
867 mutex_lock(&q->mutex);
869 if (queue_len(&printqueue) > 0) {
870 e = queue_pop_from_front(q);
875 mutex_unlock(&q->mutex);
880 static const struct policy *policy_from_name(const char *name)
882 const struct policy *p = &policies[0];
884 while (p->name != NULL) {
885 if (!strcmp(name, p->name))
892 static const char *policy_name(int policy)
894 const struct policy *p = &policies[0];
895 static const char *rval = "unknown";
897 while (p->name != NULL) {
898 if (p->policy == policy)
905 static bool is_relevant_tracer(const char *name)
909 for (i = 0; relevant_tracers[i]; i++)
910 if (!strcmp(name, relevant_tracers[i]))
915 static bool random_makes_sense(const char *name)
919 for (i = 0; random_tracers[i]; i++)
920 if (!strcmp(name, random_tracers[i]))
925 static void show_available(void)
931 tracers = tracefs_tracers(NULL);
932 for (i = 0; tracers && tracers[i]; i++) {
933 if (is_relevant_tracer(tracers[i]))
938 warnx(no_tracer_msg);
943 warnx(no_latency_tr_msg);
944 tracefs_list_free(tracers);
948 printf("The following latency tracers are available on your system:\n");
949 for (i = 0; tracers[i]; i++) {
950 if (is_relevant_tracer(tracers[i]))
951 printf("%s\n", tracers[i]);
953 tracefs_list_free(tracers);
956 static bool tracer_valid(const char *name, bool *notracer)
963 tracers = tracefs_tracers(NULL);
968 for (i = 0; tracers[i]; i++)
969 if (!strcmp(tracers[i], name)) {
973 tracefs_list_free(tracers);
977 static const char *find_default_tracer(void)
983 for (i = 0; relevant_tracers[i]; i++) {
984 valid = tracer_valid(relevant_tracers[i], ¬racer);
986 errx(EXIT_FAILURE, no_tracer_msg);
988 return relevant_tracers[i];
993 static bool toss_coin(struct drand48_data *buffer, unsigned int prob)
997 if (unlikely(lrand48_r(buffer, &r))) {
998 warnx("lrand48_r() failed");
999 cleanup_exit(EXIT_FAILURE);
1009 static long go_to_sleep(const struct entry *req)
1011 struct timespec future;
1012 long delay = sleep_time;
1014 get_time_in_future(&future, delay);
1016 mutex_lock(&printstate.mutex);
1017 while (!printstate_has_new_req_arrived(req)) {
1018 cond_timedwait(&printstate.cond, &printstate.mutex, &future);
1019 if (time_has_passed(&future))
1023 if (printstate_has_new_req_arrived(req))
1025 mutex_unlock(&printstate.mutex);
1031 static void set_priority(void)
1035 struct sched_param param;
1037 memset(¶m, 0, sizeof(param));
1038 param.sched_priority = sched_pri;
1041 r = sched_setscheduler(pid, sched_policy, ¶m);
1044 err(EXIT_FAILURE, "sched_setscheduler() failed");
1047 pid_t latency_collector_gettid(void)
1049 return (pid_t) syscall(__NR_gettid);
1052 static void print_priority(void)
1057 struct sched_param param;
1059 tid = latency_collector_gettid();
1060 r = pthread_getschedparam(pthread_self(), &policy, ¶m);
1062 warn("pthread_getschedparam() failed");
1063 cleanup_exit(EXIT_FAILURE);
1065 mutex_lock(&print_mtx);
1066 printf("Thread %d runs with scheduling policy %s and priority %d\n",
1067 tid, policy_name(policy), param.sched_priority);
1068 mutex_unlock(&print_mtx);
1071 static __always_inline
1072 void __print_skipmessage(const struct short_msg *resize_msg,
1073 const struct timespec *timestamp, char *buffer,
1074 size_t bufspace, const struct entry *req, bool excuse,
1078 char *p = &buffer[0];
1082 sec = timestamp->tv_sec;
1083 us = timestamp->tv_nsec / 1000;
1085 if (resize_msg != NULL && resize_msg->len > 0) {
1086 strncpy(p, resize_msg->buf, resize_msg->len);
1087 bytes += resize_msg->len;
1088 p += resize_msg->len;
1089 bufspace -= resize_msg->len;
1093 r = snprintf(p, bufspace,
1094 "%ld.%06ld Latency %d printout skipped due to %s\n",
1095 sec, us, req->ticket, str);
1097 r = snprintf(p, bufspace, "%ld.%06ld Latency %d detected\n",
1098 sec, us, req->ticket);
1101 warn("snprintf() failed");
1105 /* These prints could happen concurrently */
1106 mutex_lock(&print_mtx);
1107 write_or_die(fd_stdout, buffer, bytes);
1108 mutex_unlock(&print_mtx);
1111 static void print_skipmessage(const struct short_msg *resize_msg,
1112 const struct timespec *timestamp, char *buffer,
1113 size_t bufspace, const struct entry *req,
1116 __print_skipmessage(resize_msg, timestamp, buffer, bufspace, req,
1117 excuse, "random delay");
1120 static void print_lostmessage(const struct timespec *timestamp, char *buffer,
1121 size_t bufspace, const struct entry *req,
1124 __print_skipmessage(NULL, timestamp, buffer, bufspace, req, true,
1128 static void print_tracefile(const struct short_msg *resize_msg,
1129 const struct timespec *timestamp, char *buffer,
1130 size_t bufspace, long slept,
1131 const struct entry *req)
1133 static const int reserve = 256;
1134 char *p = &buffer[0];
1136 ssize_t bytes_tot = 0;
1141 /* Save some space for the final string and final null char */
1142 bufspace = bufspace - reserve - 1;
1144 if (resize_msg != NULL && resize_msg->len > 0) {
1145 bytes = resize_msg->len;
1146 strncpy(p, resize_msg->buf, bytes);
1152 trace_fd = open(debug_tracefile, O_RDONLY);
1155 warn("open() failed on %s", debug_tracefile);
1159 sec = timestamp->tv_sec;
1160 us = timestamp->tv_nsec / 1000;
1163 slept_ms = slept / 1000;
1164 bytes = snprintf(p, bufspace,
1165 "%ld.%06ld Latency %d randomly sleep for %ld ms before print\n",
1166 sec, us, req->ticket, slept_ms);
1168 bytes = snprintf(p, bufspace,
1169 "%ld.%06ld Latency %d immediate print\n", sec,
1174 warn("snprintf() failed");
1181 bytes = snprintf(p, bufspace,
1182 ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> BEGIN <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
1186 warn("snprintf() failed");
1195 bytes = read(trace_fd, p, bufspace);
1199 warn("read() failed on %s", debug_tracefile);
1200 if (unlikely(close(trace_fd) != 0))
1201 warn("close() failed on %s", debug_tracefile);
1211 if (unlikely(close(trace_fd) != 0))
1212 warn("close() failed on %s", debug_tracefile);
1214 printstate_cnt_dec();
1215 /* Add the reserve space back to the budget for the final string */
1216 bufspace += reserve;
1218 bytes = snprintf(p, bufspace,
1219 ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> END <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n\n");
1222 warn("snprintf() failed");
1228 /* These prints could happen concurrently */
1229 mutex_lock(&print_mtx);
1230 write_or_die(fd_stdout, buffer, bytes_tot);
1231 mutex_unlock(&print_mtx);
1234 static char *get_no_opt(const char *opt)
1239 s = strlen(opt) + strlen(OPT_NO_PREFIX) + 1;
1240 /* We may be called from cleanup_exit() via set_trace_opt() */
1241 no_opt = malloc_or_die_nocleanup(s);
1242 strcpy(no_opt, OPT_NO_PREFIX);
1243 strcat(no_opt, opt);
1247 static char *find_next_optstr(const char *allopt, const char **next)
1257 for (begin = allopt; *begin != '\0'; begin++) {
1258 if (isgraph(*begin))
1265 for (end = begin; *end != '\0' && isgraph(*end); end++)
1268 r = malloc_or_die_nocleanup(s + 1);
1269 strncpy(r, begin, s);
1275 static bool get_trace_opt(const char *allopt, const char *opt, bool *found)
1280 const char *next = allopt;
1283 no_opt = get_no_opt(opt);
1286 str = find_next_optstr(next, &next);
1289 if (!strcmp(str, opt)) {
1295 if (!strcmp(str, no_opt)) {
1308 static int set_trace_opt(const char *opt, bool value)
1316 str = get_no_opt(opt);
1318 r = tracefs_instance_file_write(NULL, TR_OPTIONS, str);
1323 void save_trace_opts(struct ftrace_state *state)
1329 allopt = tracefs_instance_file_read(NULL, TR_OPTIONS, &psize);
1331 errx(EXIT_FAILURE, "Failed to read the %s file\n", TR_OPTIONS);
1333 for (i = 0; i < OPTIDX_NR; i++)
1334 state->opt[i] = get_trace_opt(allopt, optstr[i],
1335 &state->opt_valid[i]);
1340 static void write_file(const char *file, const char *cur, const char *new,
1344 static const char *emsg = "Failed to write to the %s file!";
1346 /* Do nothing if we now that the current and new value are equal */
1347 if (cur && !needs_change(cur, new))
1350 r = tracefs_instance_file_write(NULL, file, new);
1354 if (h == ERR_CLEANUP)
1355 cleanup_exit(EXIT_FAILURE);
1357 errx(EXIT_FAILURE, emsg, file);
1359 if (verbose_ftrace()) {
1360 mutex_lock(&print_mtx);
1361 printf("%s was set to %s\n", file, new);
1362 mutex_unlock(&print_mtx);
1366 static void reset_max_latency(void)
1368 write_file(TR_MAXLAT, NULL, "0", ERR_CLEANUP);
1371 static void save_and_disable_tracer(void)
1375 bool need_nop = false;
1377 mutex_lock(&save_state.mutex);
1379 save_trace_opts(&save_state);
1380 tracer = read_file(TR_CURRENT, ERR_EXIT);
1381 orig_th = read_file(TR_THRESH, ERR_EXIT);
1383 if (needs_change(tracer, NOP_TRACER)) {
1384 mutex_lock(&print_mtx);
1387 "The %s tracer is already in use but proceeding anyway!\n",
1391 "The %s tracer is already in use, cowardly bailing out!\n"
1392 "This could indicate that another program or instance is tracing.\n"
1393 "Use the -F [--force] option to disregard the current tracer.\n", tracer);
1396 mutex_unlock(&print_mtx);
1400 save_state.tracer = tracer;
1401 save_state.thresh = orig_th;
1404 write_file(TR_CURRENT, NULL, NOP_TRACER, ERR_EXIT);
1406 mutex_unlock(&save_state.mutex);
1409 void set_trace_opts(struct ftrace_state *state, bool *new)
1415 * We only set options if we earlier detected that the option exists in
1416 * the trace_options file and that the wanted setting is different from
1417 * the one we saw in save_and_disable_tracer()
1419 for (i = 0; i < OPTIDX_NR; i++)
1420 if (state->opt_valid[i] &&
1421 state->opt[i] != new[i]) {
1422 r = set_trace_opt(optstr[i], new[i]);
1424 warnx("Failed to set the %s option to %s",
1425 optstr[i], bool2str(new[i]));
1426 cleanup_exit(EXIT_FAILURE);
1428 if (verbose_ftrace()) {
1429 mutex_lock(&print_mtx);
1430 printf("%s in %s was set to %s\n", optstr[i],
1431 TR_OPTIONS, bool2str(new[i]));
1432 mutex_unlock(&print_mtx);
1437 static void enable_tracer(void)
1439 mutex_lock(&save_state.mutex);
1440 set_trace_opts(&save_state, use_options);
1442 write_file(TR_THRESH, save_state.thresh, threshold, ERR_CLEANUP);
1443 write_file(TR_CURRENT, NOP_TRACER, current_tracer, ERR_CLEANUP);
1445 mutex_unlock(&save_state.mutex);
1448 static void tracing_loop(void)
1450 int ifd = inotify_init();
1452 const ssize_t bufsize = sizeof(inotify_buffer);
1453 const ssize_t istructsize = sizeof(struct inotify_event);
1454 char *buf = &inotify_buffer[0];
1458 struct inotify_event *event;
1461 const size_t bufspace = PRINT_BUFFER_SIZE;
1462 struct timespec timestamp;
1466 buffer = malloc_or_die(bufspace);
1469 err(EXIT_FAILURE, "inotify_init() failed!");
1474 * We must disable the tracer before resetting the max_latency
1476 save_and_disable_tracer();
1478 * We must reset the max_latency before the inotify_add_watch()
1481 reset_max_latency();
1484 wd = inotify_add_watch(ifd, debug_maxlat, IN_MODIFY);
1486 err(EXIT_FAILURE, "inotify_add_watch() failed!");
1491 signal_blocking(SIG_UNBLOCK);
1496 nr_read = read(ifd, buf, bufsize);
1501 warn("read() failed on inotify fd!");
1502 cleanup_exit(EXIT_FAILURE);
1504 if (nr_read == bufsize)
1505 warnx("inotify() buffer filled, skipping events");
1506 if (nr_read < istructsize) {
1507 warnx("read() returned too few bytes on inotify fd");
1508 cleanup_exit(EXIT_FAILURE);
1511 for (p = buf; p < buf + nr_read;) {
1512 event = (struct inotify_event *) p;
1513 if ((event->mask & IN_MODIFY) != 0)
1515 p += istructsize + event->len;
1517 while (modified > 0) {
1519 mutex_lock(&printstate.mutex);
1521 printstate_next_ticket(&req);
1522 if (printstate_cnt_read() > 0) {
1523 printstate_mark_req_completed(&req);
1524 mutex_unlock(&printstate.mutex);
1525 if (verbose_lostevent()) {
1526 clock_gettime_or_die(CLOCK_MONOTONIC,
1528 print_lostmessage(×tamp, buffer,
1534 mutex_unlock(&printstate.mutex);
1535 if (queue_try_to_add_entry(&printqueue, &req) != 0) {
1536 /* These prints could happen concurrently */
1538 mutex_lock(&print_mtx);
1540 write_or_die(fd_stdout, queue_full_warning,
1541 strlen(queue_full_warning));
1542 mutex_unlock(&print_mtx);
1549 static void *do_printloop(void *arg)
1551 const size_t bufspace = PRINT_BUFFER_SIZE;
1553 long *rseed = (long *) arg;
1554 struct drand48_data drandbuf;
1558 struct timespec timestamp;
1559 struct short_msg resize_msg;
1563 if (srand48_r(*rseed, &drandbuf) != 0) {
1564 warn("srand48_r() failed!\n");
1565 cleanup_exit(EXIT_FAILURE);
1568 buffer = malloc_or_die(bufspace);
1571 req = queue_wait_for_entry(&printqueue);
1572 clock_gettime_or_die(CLOCK_MONOTONIC, ×tamp);
1573 mutex_lock(&printstate.mutex);
1574 if (prev_req_won_race(&req)) {
1575 printstate_mark_req_completed(&req);
1576 mutex_unlock(&printstate.mutex);
1577 if (verbose_lostevent())
1578 print_lostmessage(×tamp, buffer, bufspace,
1579 &req, "print loop");
1582 mutex_unlock(&printstate.mutex);
1585 * Toss a coin to decide if we want to sleep before printing
1586 * out the backtrace. The reason for this is that opening
1587 * /sys/kernel/debug/tracing/trace will cause a blackout of
1588 * hundreds of ms, where no latencies will be noted by the
1589 * latency tracer. Thus by randomly sleeping we try to avoid
1590 * missing traces systematically due to this. With this option
1591 * we will sometimes get the first latency, some other times
1592 * some of the later ones, in case of closely spaced traces.
1594 if (trace_enable && use_random_sleep) {
1596 prob = table_get_probability(&req, &resize_msg);
1597 if (!toss_coin(&drandbuf, prob))
1598 slept = go_to_sleep(&req);
1600 /* A print is ongoing */
1601 printstate_cnt_inc();
1603 * We will do the printout below so we have to
1604 * mark it as completed while we still have the
1607 mutex_lock(&printstate.mutex);
1608 printstate_mark_req_completed(&req);
1609 mutex_unlock(&printstate.mutex);
1614 * slept < 0 means that we detected another
1615 * notification in go_to_sleep() above
1619 * N.B. printstate_cnt_dec(); will be called
1620 * inside print_tracefile()
1622 print_tracefile(&resize_msg, ×tamp, buffer,
1623 bufspace, slept, &req);
1625 print_skipmessage(&resize_msg, ×tamp,
1626 buffer, bufspace, &req, true);
1628 print_skipmessage(&resize_msg, ×tamp, buffer,
1629 bufspace, &req, false);
1635 static void start_printthread(void)
1641 ufd = open(DEV_URANDOM, O_RDONLY);
1642 if (nr_threads > MAX_THREADS) {
1644 "Number of requested print threads was %d, max number is %d\n",
1645 nr_threads, MAX_THREADS);
1646 nr_threads = MAX_THREADS;
1648 for (i = 0; i < nr_threads; i++) {
1649 seed = malloc_or_die(sizeof(*seed));
1651 read(ufd, seed, sizeof(*seed)) != sizeof(*seed)) {
1653 "Warning! Using trivial random number seed, since %s not available\n",
1658 errno = pthread_create(&printthread[i], NULL, do_printloop,
1661 err(EXIT_FAILURE, "pthread_create()");
1663 if (ufd > 0 && close(ufd) != 0)
1664 warn("close() failed");
1667 static void show_usage(void)
1670 "Usage: %s [OPTION]...\n\n"
1671 "Collect closely occurring latencies from %s\n"
1672 "with any of the following tracers: preemptirqsoff, preemptoff, irqsoff, "
1673 "wakeup,\nwakeup_dl, or wakeup_rt.\n\n"
1675 "The occurrence of a latency is detected by monitoring the file\n"
1676 "%s with inotify.\n\n"
1678 "The following options are supported:\n\n"
1680 "-l, --list\t\tList the latency tracers that are supported by the\n"
1681 "\t\t\tcurrently running Linux kernel. If you don't see the\n"
1682 "\t\t\ttracer that you want, you will probably need to\n"
1683 "\t\t\tchange your kernel config and build a new kernel.\n\n"
1685 "-t, --tracer TR\t\tUse the tracer TR. The default is to use the first\n"
1686 "\t\t\ttracer that is supported by the kernel in the following\n"
1687 "\t\t\torder of precedence:\n\n"
1688 "\t\t\tpreemptirqsoff\n"
1689 "\t\t\tpreemptoff\n"
1695 "\t\t\tIf TR is not on the list above, then a warning will be\n"
1696 "\t\t\tprinted.\n\n"
1698 "-F, --force\t\tProceed even if another ftrace tracer is active. Without\n"
1699 "\t\t\tthis option, the program will refuse to start tracing if\n"
1700 "\t\t\tany other tracer than the nop tracer is active.\n\n"
1702 "-s, --threshold TH\tConfigure ftrace to use a threshold of TH microseconds\n"
1703 "\t\t\tfor the tracer. The default is 0, which means that\n"
1704 "\t\t\ttracing_max_latency will be used. tracing_max_latency is\n"
1705 "\t\t\tset to 0 when the program is started and contains the\n"
1706 "\t\t\tmaximum of the latencies that have been encountered.\n\n"
1708 "-f, --function\t\tEnable the function-trace option in trace_options. With\n"
1709 "\t\t\tthis option, ftrace will trace the functions that are\n"
1710 "\t\t\texecuted during a latency, without it we only get the\n"
1711 "\t\t\tbeginning, end, and backtrace.\n\n"
1713 "-g, --graph\t\tEnable the display-graph option in trace_option. This\n"
1714 "\t\t\toption causes ftrace to show the graph of how functions\n"
1715 "\t\t\tare calling other functions.\n\n"
1717 "-c, --policy POL\tRun the program with scheduling policy POL. POL can be\n"
1718 "\t\t\tother, batch, idle, rr or fifo. The default is rr. When\n"
1719 "\t\t\tusing rr or fifo, remember that these policies may cause\n"
1720 "\t\t\tother tasks to experience latencies.\n\n"
1722 "-p, --priority PRI\tRun the program with priority PRI. The acceptable range\n"
1723 "\t\t\tof PRI depends on the scheduling policy.\n\n"
1725 "-n, --notrace\t\tIf latency is detected, do not print out the content of\n"
1726 "\t\t\tthe trace file to standard output\n\n"
1728 "-t, --threads NRTHR\tRun NRTHR threads for printing. Default is %d.\n\n"
1730 "-r, --random\t\tArbitrarily sleep a certain amount of time, default\n"
1731 "\t\t\t%ld ms, before reading the trace file. The\n"
1732 "\t\t\tprobabilities for sleep are chosen so that the\n"
1733 "\t\t\tprobability of obtaining any of a cluster of closely\n"
1734 "\t\t\toccurring latencies are equal, i.e. we will randomly\n"
1735 "\t\t\tchoose which one we collect from the trace file.\n\n"
1736 "\t\t\tThis option is probably only useful with the irqsoff,\n"
1737 "\t\t\tpreemptoff, and preemptirqsoff tracers.\n\n"
1739 "-a, --nrlat NRLAT\tFor the purpose of arbitrary delay, assume that there\n"
1740 "\t\t\tare no more than NRLAT clustered latencies. If NRLAT\n"
1741 "\t\t\tlatencies are detected during a run, this value will\n"
1742 "\t\t\tautomatically be increased to NRLAT + 1 and then to\n"
1743 "\t\t\tNRLAT + 2 and so on. The default is %d. This option\n"
1744 "\t\t\timplies -r. We need to know this number in order to\n"
1745 "\t\t\tbe able to calculate the probabilities of sleeping.\n"
1746 "\t\t\tSpecifically, the probabilities of not sleeping, i.e. to\n"
1747 "\t\t\tdo an immediate printout will be:\n\n"
1748 "\t\t\t1/NRLAT 1/(NRLAT - 1) ... 1/3 1/2 1\n\n"
1749 "\t\t\tThe probability of sleeping will be:\n\n"
1750 "\t\t\t1 - P, where P is from the series above\n\n"
1751 "\t\t\tThis descending probability will cause us to choose\n"
1752 "\t\t\tan occurrence at random. Observe that the final\n"
1753 "\t\t\tprobability is 0, it is when we reach this probability\n"
1754 "\t\t\tthat we increase NRLAT automatically. As an example,\n"
1755 "\t\t\twith the default value of 2, the probabilities will be:\n\n"
1757 "\t\t\tThis means, when a latency is detected we will sleep\n"
1758 "\t\t\twith 50%% probability. If we ever detect another latency\n"
1759 "\t\t\tduring the sleep period, then the probability of sleep\n"
1760 "\t\t\twill be 0%% and the table will be expanded to:\n\n"
1761 "\t\t\t1/3 1/2 0\n\n"
1763 "-v, --verbose\t\tIncrease the verbosity. If this option is given once,\n"
1764 "\t\t\tthen print a message every time that the NRLAT value\n"
1765 "\t\t\tis automatically increased. It also causes a message to\n"
1766 "\t\t\tbe printed when the ftrace settings are changed. If this\n"
1767 "\t\t\toption is given at least twice, then also print a\n"
1768 "\t\t\twarning for lost events.\n\n"
1770 "-u, --time TIME\t\tArbitrarily sleep for a specified time TIME ms before\n"
1771 "\t\t\tprinting out the trace from the trace file. The default\n"
1772 "\t\t\tis %ld ms. This option implies -r.\n\n"
1774 "-x, --no-ftrace\t\tDo not configure ftrace. This assume that the user\n"
1775 "\t\t\tconfigures the ftrace files in sysfs such as\n"
1776 "\t\t\t/sys/kernel/tracing/current_tracer or equivalent.\n\n"
1778 "-i, --tracefile FILE\tUse FILE as trace file. The default is\n"
1780 "\t\t\tThis options implies -x\n\n"
1782 "-m, --max-lat FILE\tUse FILE as tracing_max_latency file. The default is\n"
1784 "\t\t\tThis options implies -x\n\n"
1786 prg_name, debug_tracefile_dflt, debug_maxlat_dflt, DEFAULT_NR_PRINTER_THREADS,
1787 SLEEP_TIME_MS_DEFAULT, DEFAULT_TABLE_SIZE, SLEEP_TIME_MS_DEFAULT,
1788 debug_tracefile_dflt, debug_maxlat_dflt);
1791 static void find_tracefiles(void)
1793 debug_tracefile_dflt = tracefs_get_tracing_file("trace");
1794 if (debug_tracefile_dflt == NULL) {
1795 /* This is needed in show_usage() */
1796 debug_tracefile_dflt = DEBUG_NOFILE;
1799 debug_maxlat_dflt = tracefs_get_tracing_file("tracing_max_latency");
1800 if (debug_maxlat_dflt == NULL) {
1801 /* This is needed in show_usage() */
1802 debug_maxlat_dflt = DEBUG_NOFILE;
1805 debug_tracefile = debug_tracefile_dflt;
1806 debug_maxlat = debug_maxlat_dflt;
1809 bool alldigits(const char *s)
1811 for (; *s != '\0'; s++)
1817 void check_alldigits(const char *optarg, const char *argname)
1819 if (!alldigits(optarg))
1821 "The %s parameter expects a decimal argument\n", argname);
1824 static void scan_arguments(int argc, char *argv[])
1830 static struct option long_options[] = {
1831 { "list", no_argument, 0, 'l' },
1832 { "tracer", required_argument, 0, 't' },
1833 { "force", no_argument, 0, 'F' },
1834 { "threshold", required_argument, 0, 's' },
1835 { "function", no_argument, 0, 'f' },
1836 { "graph", no_argument, 0, 'g' },
1837 { "policy", required_argument, 0, 'c' },
1838 { "priority", required_argument, 0, 'p' },
1839 { "help", no_argument, 0, 'h' },
1840 { "notrace", no_argument, 0, 'n' },
1841 { "random", no_argument, 0, 'r' },
1842 { "nrlat", required_argument, 0, 'a' },
1843 { "threads", required_argument, 0, 'e' },
1844 { "time", required_argument, 0, 'u' },
1845 { "verbose", no_argument, 0, 'v' },
1846 { "no-ftrace", no_argument, 0, 'x' },
1847 { "tracefile", required_argument, 0, 'i' },
1848 { "max-lat", required_argument, 0, 'm' },
1851 const struct policy *p;
1854 bool notracer, valid;
1857 * We must do this before parsing the arguments because show_usage()
1858 * needs to display these.
1863 c = getopt_long(argc, argv, "lt:Fs:fgc:p:hnra:e:u:vxi:m:",
1864 long_options, &option_idx);
1874 current_tracer = strdup(optarg);
1875 if (!is_relevant_tracer(current_tracer)) {
1876 warnx("%s is not a known latency tracer!\n",
1879 valid = tracer_valid(current_tracer, ¬racer);
1881 errx(EXIT_FAILURE, no_tracer_msg);
1884 "The tracer %s is not supported by your kernel!\n", current_tracer);
1887 force_tracer = true;
1890 check_alldigits(optarg, "-s [--threshold]");
1891 threshold = strdup(optarg);
1894 use_options[OPTIDX_FUNC_TR] = true;
1897 use_options[OPTIDX_DISP_GR] = true;
1900 p = policy_from_name(optarg);
1902 sched_policy = p->policy;
1903 sched_policy_set = true;
1904 if (!sched_pri_set) {
1905 sched_pri = p->default_pri;
1906 sched_pri_set = true;
1909 warnx("Unknown scheduling %s\n", optarg);
1915 check_alldigits(optarg, "-p [--priority]");
1916 sched_pri = atoi(optarg);
1917 sched_pri_set = true;
1924 trace_enable = false;
1925 use_random_sleep = false;
1928 check_alldigits(optarg, "-e [--threads]");
1929 value = atoi(optarg);
1933 warnx("NRTHR must be > 0\n");
1939 check_alldigits(optarg, "-u [--time]");
1940 value = atoi(optarg);
1942 warnx("TIME must be >= 0\n");
1946 trace_enable = true;
1947 use_random_sleep = true;
1948 sleep_time = value * USEC_PER_MSEC;
1954 trace_enable = true;
1955 use_random_sleep = true;
1958 check_alldigits(optarg, "-a [--nrlat]");
1959 value = atoi(optarg);
1961 warnx("NRLAT must be > 0\n");
1965 trace_enable = true;
1966 use_random_sleep = true;
1967 table_startsize = value;
1970 setup_ftrace = false;
1973 setup_ftrace = false;
1974 debug_tracefile = strdup(optarg);
1977 setup_ftrace = false;
1978 debug_maxlat = strdup(optarg);
1988 if (!current_tracer) {
1989 current_tracer = find_default_tracer();
1990 if (!current_tracer)
1992 "No default tracer found and tracer not specified\n");
1995 if (use_random_sleep && !random_makes_sense(current_tracer)) {
1996 warnx("WARNING: The tracer is %s and random sleep has",
1999 "been enabled. Random sleep is intended for the following tracers:\n");
2000 for (i = 0; random_tracers[i]; i++)
2001 fprintf(stderr, "%s\n", random_tracers[i]);
2002 fprintf(stderr, "\n");
2006 if (debug_tracefile == DEBUG_NOFILE ||
2007 debug_maxlat == DEBUG_NOFILE)
2009 "Could not find tracing directory e.g. /sys/kernel/tracing\n");
2011 if (!sched_policy_set) {
2012 sched_policy = SCHED_RR;
2013 sched_policy_set = true;
2014 if (!sched_pri_set) {
2015 sched_pri = RT_DEFAULT_PRI;
2016 sched_pri_set = true;
2020 max = sched_get_priority_max(sched_policy);
2021 min = sched_get_priority_min(sched_policy);
2023 if (sched_pri < min) {
2025 "ATTENTION: Increasing priority to minimum, which is %d\n", min);
2028 if (sched_pri > max) {
2030 "ATTENTION: Reducing priority to maximum, which is %d\n", max);
2035 static void show_params(void)
2039 "Running with scheduling policy %s and priority %d. Using %d print threads.\n",
2040 policy_name(sched_policy), sched_pri, nr_threads);
2042 if (use_random_sleep) {
2044 "%s will be printed with random delay\n"
2045 "Start size of the probability table:\t\t\t%d\n"
2046 "Print a message when the prob. table changes size:\t%s\n"
2047 "Print a warning when an event has been lost:\t\t%s\n"
2048 "Sleep time is:\t\t\t\t\t\t%ld ms\n",
2051 bool2str(verbose_sizechange()),
2052 bool2str(verbose_lostevent()),
2053 sleep_time / USEC_PER_MSEC);
2055 printf("%s will be printed immediately\n",
2059 printf("%s will not be printed\n",
2063 printf("Tracer:\t\t\t\t\t\t\t%s\n"
2064 "%s option:\t\t\t\t\t%s\n"
2065 "%s option:\t\t\t\t\t%s\n",
2067 optstr[OPTIDX_FUNC_TR],
2068 bool2str(use_options[OPTIDX_FUNC_TR]),
2069 optstr[OPTIDX_DISP_GR],
2070 bool2str(use_options[OPTIDX_DISP_GR]));
2071 if (!strcmp(threshold, "0"))
2072 printf("Threshold:\t\t\t\t\t\ttracing_max_latency\n");
2074 printf("Threshold:\t\t\t\t\t\t%s\n", threshold);
2079 int main(int argc, char *argv[])
2082 signal_blocking(SIG_BLOCK);
2083 setup_sig_handler();
2089 prg_name = prg_unknown;
2091 scan_arguments(argc, argv);
2096 if (use_random_sleep) {
2097 init_probabilities();
2098 if (verbose_sizechange())
2099 printf("Initializing probability table to %d\n",
2101 sleeptable_resize(table_startsize, false, NULL);
2104 init_queue(&printqueue);
2105 start_printthread();