1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
91 #define DEBUG(msg, args...) \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
101 /* Update the branch trace for the current thread and return a pointer to its
104 Throws an error if there is no thread or no trace. This function never
107 static struct thread_info *
108 require_btrace_thread (void)
110 struct thread_info *tp;
114 tp = find_thread_ptid (inferior_ptid);
116 error (_("No thread."));
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
129 Throws an error if there is no thread or no trace. This function never
132 static struct btrace_thread_info *
133 require_btrace (void)
135 struct thread_info *tp;
137 tp = require_btrace_thread ();
142 /* Enable branch tracing for one thread. Warn on errors. */
145 record_btrace_enable_warn (struct thread_info *tp)
149 btrace_enable (tp, &record_btrace_conf);
151 CATCH (error, RETURN_MASK_ERROR)
153 warning ("%s", error.message);
158 /* Callback function to disable branch tracing for one thread. */
161 record_btrace_disable_callback (void *arg)
163 struct thread_info *tp;
170 /* Enable automatic tracing of new threads. */
173 record_btrace_auto_enable (void)
175 DEBUG ("attach thread observer");
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
181 /* Disable automatic tracing of new threads. */
184 record_btrace_auto_disable (void)
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
190 DEBUG ("detach thread observer");
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
196 /* The record-btrace async event handler function. */
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
201 inferior_event_handler (INF_REG_EVENT, NULL);
204 /* The to_open method of target record-btrace. */
207 record_btrace_open (const char *args, int from_tty)
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
216 if (!target_has_execution)
217 error (_("The program is not being run."));
220 error (_("Record btrace can't debug inferior in non-stop mode."));
222 gdb_assert (record_btrace_thread_observer == NULL);
224 disable_chain = make_cleanup (null_cleanup, NULL);
225 ALL_NON_EXITED_THREADS (tp)
226 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
228 btrace_enable (tp, &record_btrace_conf);
230 make_cleanup (record_btrace_disable_callback, tp);
233 record_btrace_auto_enable ();
235 push_target (&record_btrace_ops);
237 record_btrace_async_inferior_event_handler
238 = create_async_event_handler (record_btrace_handle_async_inferior_event,
240 record_btrace_generating_corefile = 0;
242 observer_notify_record_changed (current_inferior (), 1);
244 discard_cleanups (disable_chain);
247 /* The to_stop_recording method of target record-btrace. */
250 record_btrace_stop_recording (struct target_ops *self)
252 struct thread_info *tp;
254 DEBUG ("stop recording");
256 record_btrace_auto_disable ();
258 ALL_NON_EXITED_THREADS (tp)
259 if (tp->btrace.target != NULL)
263 /* The to_close method of target record-btrace. */
266 record_btrace_close (struct target_ops *self)
268 struct thread_info *tp;
270 if (record_btrace_async_inferior_event_handler != NULL)
271 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
273 /* Make sure automatic recording gets disabled even if we did not stop
274 recording before closing the record-btrace target. */
275 record_btrace_auto_disable ();
277 /* We should have already stopped recording.
278 Tear down btrace in case we have not. */
279 ALL_NON_EXITED_THREADS (tp)
280 btrace_teardown (tp);
283 /* The to_async method of target record-btrace. */
286 record_btrace_async (struct target_ops *ops, int enable)
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
293 ops->beneath->to_async (ops->beneath, enable);
296 /* Adjusts the size and returns a human readable size suffix. */
299 record_btrace_adjust_size (unsigned int *size)
305 if ((sz & ((1u << 30) - 1)) == 0)
310 else if ((sz & ((1u << 20) - 1)) == 0)
315 else if ((sz & ((1u << 10) - 1)) == 0)
324 /* Print a BTS configuration. */
327 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
340 /* Print an Intel(R) Processor Trace configuration. */
343 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
351 suffix = record_btrace_adjust_size (&size);
352 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
356 /* Print a branch tracing configuration. */
359 record_btrace_print_conf (const struct btrace_config *conf)
361 printf_unfiltered (_("Recording format: %s.\n"),
362 btrace_format_string (conf->format));
364 switch (conf->format)
366 case BTRACE_FORMAT_NONE:
369 case BTRACE_FORMAT_BTS:
370 record_btrace_print_bts_conf (&conf->bts);
373 case BTRACE_FORMAT_PT:
374 record_btrace_print_pt_conf (&conf->pt);
378 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
381 /* The to_info_record method of target record-btrace. */
384 record_btrace_info (struct target_ops *self)
386 struct btrace_thread_info *btinfo;
387 const struct btrace_config *conf;
388 struct thread_info *tp;
389 unsigned int insns, calls, gaps;
393 tp = find_thread_ptid (inferior_ptid);
395 error (_("No thread."));
397 btinfo = &tp->btrace;
399 conf = btrace_conf (btinfo);
401 record_btrace_print_conf (conf);
409 if (!btrace_is_empty (tp))
411 struct btrace_call_iterator call;
412 struct btrace_insn_iterator insn;
414 btrace_call_end (&call, btinfo);
415 btrace_call_prev (&call, 1);
416 calls = btrace_call_number (&call);
418 btrace_insn_end (&insn, btinfo);
420 insns = btrace_insn_number (&insn);
423 /* The last instruction does not really belong to the trace. */
430 /* Skip gaps at the end. */
433 steps = btrace_insn_prev (&insn, 1);
437 insns = btrace_insn_number (&insn);
442 gaps = btinfo->ngaps;
445 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
446 "for thread %d (%s).\n"), insns, calls, gaps,
447 tp->num, target_pid_to_str (tp->ptid));
449 if (btrace_is_replaying (tp))
450 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
451 btrace_insn_number (btinfo->replay));
454 /* Print a decode error. */
457 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
458 enum btrace_format format)
463 errstr = _("unknown");
471 case BTRACE_FORMAT_BTS:
477 case BDE_BTS_OVERFLOW:
478 errstr = _("instruction overflow");
481 case BDE_BTS_INSN_SIZE:
482 errstr = _("unknown instruction");
487 #if defined (HAVE_LIBIPT)
488 case BTRACE_FORMAT_PT:
491 case BDE_PT_USER_QUIT:
493 errstr = _("trace decode cancelled");
496 case BDE_PT_DISABLED:
498 errstr = _("disabled");
501 case BDE_PT_OVERFLOW:
503 errstr = _("overflow");
508 errstr = pt_errstr (pt_errcode (errcode));
512 #endif /* defined (HAVE_LIBIPT) */
515 ui_out_text (uiout, _("["));
518 ui_out_text (uiout, _("decode error ("));
519 ui_out_field_int (uiout, "errcode", errcode);
520 ui_out_text (uiout, _("): "));
522 ui_out_text (uiout, errstr);
523 ui_out_text (uiout, _("]\n"));
526 /* Print an unsigned int. */
529 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
531 ui_out_field_fmt (uiout, fld, "%u", val);
534 /* Disassemble a section of the recorded instruction trace. */
537 btrace_insn_history (struct ui_out *uiout,
538 const struct btrace_thread_info *btinfo,
539 const struct btrace_insn_iterator *begin,
540 const struct btrace_insn_iterator *end, int flags)
542 struct gdbarch *gdbarch;
543 struct btrace_insn_iterator it;
545 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
546 btrace_insn_number (end));
548 gdbarch = target_gdbarch ();
550 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
552 const struct btrace_insn *insn;
554 insn = btrace_insn_get (&it);
556 /* A NULL instruction indicates a gap in the trace. */
559 const struct btrace_config *conf;
561 conf = btrace_conf (btinfo);
563 /* We have trace so we must have a configuration. */
564 gdb_assert (conf != NULL);
566 btrace_ui_out_decode_error (uiout, it.function->errcode,
573 /* We may add a speculation prefix later. We use the same space
574 that is used for the pc prefix. */
575 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
576 strncpy (prefix, pc_prefix (insn->pc), 3);
585 /* Print the instruction index. */
586 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
587 ui_out_text (uiout, "\t");
589 /* Indicate speculative execution by a leading '?'. */
590 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
593 /* Print the prefix; we tell gdb_disassembly below to omit it. */
594 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
596 /* Disassembly with '/m' flag may not produce the expected result.
598 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
599 1, insn->pc, insn->pc + 1);
604 /* The to_insn_history method of target record-btrace. */
607 record_btrace_insn_history (struct target_ops *self, int size, int flags)
609 struct btrace_thread_info *btinfo;
610 struct btrace_insn_history *history;
611 struct btrace_insn_iterator begin, end;
612 struct cleanup *uiout_cleanup;
613 struct ui_out *uiout;
614 unsigned int context, covered;
616 uiout = current_uiout;
617 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
619 context = abs (size);
621 error (_("Bad record instruction-history-size."));
623 btinfo = require_btrace ();
624 history = btinfo->insn_history;
627 struct btrace_insn_iterator *replay;
629 DEBUG ("insn-history (0x%x): %d", flags, size);
631 /* If we're replaying, we start at the replay position. Otherwise, we
632 start at the tail of the trace. */
633 replay = btinfo->replay;
637 btrace_insn_end (&begin, btinfo);
639 /* We start from here and expand in the requested direction. Then we
640 expand in the other direction, as well, to fill up any remaining
645 /* We want the current position covered, as well. */
646 covered = btrace_insn_next (&end, 1);
647 covered += btrace_insn_prev (&begin, context - covered);
648 covered += btrace_insn_next (&end, context - covered);
652 covered = btrace_insn_next (&end, context);
653 covered += btrace_insn_prev (&begin, context - covered);
658 begin = history->begin;
661 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
662 btrace_insn_number (&begin), btrace_insn_number (&end));
667 covered = btrace_insn_prev (&begin, context);
672 covered = btrace_insn_next (&end, context);
677 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
681 printf_unfiltered (_("At the start of the branch trace record.\n"));
683 printf_unfiltered (_("At the end of the branch trace record.\n"));
686 btrace_set_insn_history (btinfo, &begin, &end);
687 do_cleanups (uiout_cleanup);
690 /* The to_insn_history_range method of target record-btrace. */
693 record_btrace_insn_history_range (struct target_ops *self,
694 ULONGEST from, ULONGEST to, int flags)
696 struct btrace_thread_info *btinfo;
697 struct btrace_insn_history *history;
698 struct btrace_insn_iterator begin, end;
699 struct cleanup *uiout_cleanup;
700 struct ui_out *uiout;
701 unsigned int low, high;
704 uiout = current_uiout;
705 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
710 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
712 /* Check for wrap-arounds. */
713 if (low != from || high != to)
714 error (_("Bad range."));
717 error (_("Bad range."));
719 btinfo = require_btrace ();
721 found = btrace_find_insn_by_number (&begin, btinfo, low);
723 error (_("Range out of bounds."));
725 found = btrace_find_insn_by_number (&end, btinfo, high);
728 /* Silently truncate the range. */
729 btrace_insn_end (&end, btinfo);
733 /* We want both begin and end to be inclusive. */
734 btrace_insn_next (&end, 1);
737 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
738 btrace_set_insn_history (btinfo, &begin, &end);
740 do_cleanups (uiout_cleanup);
743 /* The to_insn_history_from method of target record-btrace. */
746 record_btrace_insn_history_from (struct target_ops *self,
747 ULONGEST from, int size, int flags)
749 ULONGEST begin, end, context;
751 context = abs (size);
753 error (_("Bad record instruction-history-size."));
762 begin = from - context + 1;
767 end = from + context - 1;
769 /* Check for wrap-around. */
774 record_btrace_insn_history_range (self, begin, end, flags);
777 /* Print the instruction number range for a function call history line. */
780 btrace_call_history_insn_range (struct ui_out *uiout,
781 const struct btrace_function *bfun)
783 unsigned int begin, end, size;
785 size = VEC_length (btrace_insn_s, bfun->insn);
786 gdb_assert (size > 0);
788 begin = bfun->insn_offset;
789 end = begin + size - 1;
791 ui_out_field_uint (uiout, "insn begin", begin);
792 ui_out_text (uiout, ",");
793 ui_out_field_uint (uiout, "insn end", end);
796 /* Compute the lowest and highest source line for the instructions in BFUN
797 and return them in PBEGIN and PEND.
798 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
799 result from inlining or macro expansion. */
802 btrace_compute_src_line_range (const struct btrace_function *bfun,
803 int *pbegin, int *pend)
805 struct btrace_insn *insn;
806 struct symtab *symtab;
818 symtab = symbol_symtab (sym);
820 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
822 struct symtab_and_line sal;
824 sal = find_pc_line (insn->pc, 0);
825 if (sal.symtab != symtab || sal.line == 0)
828 begin = min (begin, sal.line);
829 end = max (end, sal.line);
837 /* Print the source line information for a function call history line. */
840 btrace_call_history_src_line (struct ui_out *uiout,
841 const struct btrace_function *bfun)
850 ui_out_field_string (uiout, "file",
851 symtab_to_filename_for_display (symbol_symtab (sym)));
853 btrace_compute_src_line_range (bfun, &begin, &end);
857 ui_out_text (uiout, ":");
858 ui_out_field_int (uiout, "min line", begin);
863 ui_out_text (uiout, ",");
864 ui_out_field_int (uiout, "max line", end);
867 /* Get the name of a branch trace function. */
870 btrace_get_bfun_name (const struct btrace_function *bfun)
872 struct minimal_symbol *msym;
882 return SYMBOL_PRINT_NAME (sym);
883 else if (msym != NULL)
884 return MSYMBOL_PRINT_NAME (msym);
889 /* Disassemble a section of the recorded function trace. */
892 btrace_call_history (struct ui_out *uiout,
893 const struct btrace_thread_info *btinfo,
894 const struct btrace_call_iterator *begin,
895 const struct btrace_call_iterator *end,
896 enum record_print_flag flags)
898 struct btrace_call_iterator it;
900 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
901 btrace_call_number (end));
903 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
905 const struct btrace_function *bfun;
906 struct minimal_symbol *msym;
909 bfun = btrace_call_get (&it);
913 /* Print the function index. */
914 ui_out_field_uint (uiout, "index", bfun->number);
915 ui_out_text (uiout, "\t");
917 /* Indicate gaps in the trace. */
918 if (bfun->errcode != 0)
920 const struct btrace_config *conf;
922 conf = btrace_conf (btinfo);
924 /* We have trace so we must have a configuration. */
925 gdb_assert (conf != NULL);
927 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
932 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
934 int level = bfun->level + btinfo->level, i;
936 for (i = 0; i < level; ++i)
937 ui_out_text (uiout, " ");
941 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
942 else if (msym != NULL)
943 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
944 else if (!ui_out_is_mi_like_p (uiout))
945 ui_out_field_string (uiout, "function", "??");
947 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
949 ui_out_text (uiout, _("\tinst "));
950 btrace_call_history_insn_range (uiout, bfun);
953 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
955 ui_out_text (uiout, _("\tat "));
956 btrace_call_history_src_line (uiout, bfun);
959 ui_out_text (uiout, "\n");
963 /* The to_call_history method of target record-btrace. */
966 record_btrace_call_history (struct target_ops *self, int size, int flags)
968 struct btrace_thread_info *btinfo;
969 struct btrace_call_history *history;
970 struct btrace_call_iterator begin, end;
971 struct cleanup *uiout_cleanup;
972 struct ui_out *uiout;
973 unsigned int context, covered;
975 uiout = current_uiout;
976 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
978 context = abs (size);
980 error (_("Bad record function-call-history-size."));
982 btinfo = require_btrace ();
983 history = btinfo->call_history;
986 struct btrace_insn_iterator *replay;
988 DEBUG ("call-history (0x%x): %d", flags, size);
990 /* If we're replaying, we start at the replay position. Otherwise, we
991 start at the tail of the trace. */
992 replay = btinfo->replay;
995 begin.function = replay->function;
996 begin.btinfo = btinfo;
999 btrace_call_end (&begin, btinfo);
1001 /* We start from here and expand in the requested direction. Then we
1002 expand in the other direction, as well, to fill up any remaining
1007 /* We want the current position covered, as well. */
1008 covered = btrace_call_next (&end, 1);
1009 covered += btrace_call_prev (&begin, context - covered);
1010 covered += btrace_call_next (&end, context - covered);
1014 covered = btrace_call_next (&end, context);
1015 covered += btrace_call_prev (&begin, context- covered);
1020 begin = history->begin;
1023 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1024 btrace_call_number (&begin), btrace_call_number (&end));
1029 covered = btrace_call_prev (&begin, context);
1034 covered = btrace_call_next (&end, context);
1039 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1043 printf_unfiltered (_("At the start of the branch trace record.\n"));
1045 printf_unfiltered (_("At the end of the branch trace record.\n"));
1048 btrace_set_call_history (btinfo, &begin, &end);
1049 do_cleanups (uiout_cleanup);
1052 /* The to_call_history_range method of target record-btrace. */
1055 record_btrace_call_history_range (struct target_ops *self,
1056 ULONGEST from, ULONGEST to, int flags)
1058 struct btrace_thread_info *btinfo;
1059 struct btrace_call_history *history;
1060 struct btrace_call_iterator begin, end;
1061 struct cleanup *uiout_cleanup;
1062 struct ui_out *uiout;
1063 unsigned int low, high;
1066 uiout = current_uiout;
1067 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1072 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1074 /* Check for wrap-arounds. */
1075 if (low != from || high != to)
1076 error (_("Bad range."));
1079 error (_("Bad range."));
1081 btinfo = require_btrace ();
1083 found = btrace_find_call_by_number (&begin, btinfo, low);
1085 error (_("Range out of bounds."));
1087 found = btrace_find_call_by_number (&end, btinfo, high);
1090 /* Silently truncate the range. */
1091 btrace_call_end (&end, btinfo);
1095 /* We want both begin and end to be inclusive. */
1096 btrace_call_next (&end, 1);
1099 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1100 btrace_set_call_history (btinfo, &begin, &end);
1102 do_cleanups (uiout_cleanup);
1105 /* The to_call_history_from method of target record-btrace. */
1108 record_btrace_call_history_from (struct target_ops *self,
1109 ULONGEST from, int size, int flags)
1111 ULONGEST begin, end, context;
1113 context = abs (size);
1115 error (_("Bad record function-call-history-size."));
1124 begin = from - context + 1;
1129 end = from + context - 1;
1131 /* Check for wrap-around. */
1136 record_btrace_call_history_range (self, begin, end, flags);
1139 /* The to_record_is_replaying method of target record-btrace. */
1142 record_btrace_is_replaying (struct target_ops *self)
1144 struct thread_info *tp;
1146 ALL_NON_EXITED_THREADS (tp)
1147 if (btrace_is_replaying (tp))
1153 /* The to_xfer_partial method of target record-btrace. */
1155 static enum target_xfer_status
1156 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1157 const char *annex, gdb_byte *readbuf,
1158 const gdb_byte *writebuf, ULONGEST offset,
1159 ULONGEST len, ULONGEST *xfered_len)
1161 struct target_ops *t;
1163 /* Filter out requests that don't make sense during replay. */
1164 if (replay_memory_access == replay_memory_access_read_only
1165 && !record_btrace_generating_corefile
1166 && record_btrace_is_replaying (ops))
1170 case TARGET_OBJECT_MEMORY:
1172 struct target_section *section;
1174 /* We do not allow writing memory in general. */
1175 if (writebuf != NULL)
1178 return TARGET_XFER_UNAVAILABLE;
1181 /* We allow reading readonly memory. */
1182 section = target_section_by_addr (ops, offset);
1183 if (section != NULL)
1185 /* Check if the section we found is readonly. */
1186 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1187 section->the_bfd_section)
1188 & SEC_READONLY) != 0)
1190 /* Truncate the request to fit into this section. */
1191 len = min (len, section->endaddr - offset);
1197 return TARGET_XFER_UNAVAILABLE;
1202 /* Forward the request. */
1204 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1205 offset, len, xfered_len);
1208 /* The to_insert_breakpoint method of target record-btrace. */
1211 record_btrace_insert_breakpoint (struct target_ops *ops,
1212 struct gdbarch *gdbarch,
1213 struct bp_target_info *bp_tgt)
1218 /* Inserting breakpoints requires accessing memory. Allow it for the
1219 duration of this function. */
1220 old = replay_memory_access;
1221 replay_memory_access = replay_memory_access_read_write;
1226 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1228 CATCH (except, RETURN_MASK_ALL)
1230 replay_memory_access = old;
1231 throw_exception (except);
1234 replay_memory_access = old;
1239 /* The to_remove_breakpoint method of target record-btrace. */
1242 record_btrace_remove_breakpoint (struct target_ops *ops,
1243 struct gdbarch *gdbarch,
1244 struct bp_target_info *bp_tgt)
1249 /* Removing breakpoints requires accessing memory. Allow it for the
1250 duration of this function. */
1251 old = replay_memory_access;
1252 replay_memory_access = replay_memory_access_read_write;
1257 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1259 CATCH (except, RETURN_MASK_ALL)
1261 replay_memory_access = old;
1262 throw_exception (except);
1265 replay_memory_access = old;
1270 /* The to_fetch_registers method of target record-btrace. */
1273 record_btrace_fetch_registers (struct target_ops *ops,
1274 struct regcache *regcache, int regno)
1276 struct btrace_insn_iterator *replay;
1277 struct thread_info *tp;
1279 tp = find_thread_ptid (inferior_ptid);
1280 gdb_assert (tp != NULL);
1282 replay = tp->btrace.replay;
1283 if (replay != NULL && !record_btrace_generating_corefile)
1285 const struct btrace_insn *insn;
1286 struct gdbarch *gdbarch;
1289 gdbarch = get_regcache_arch (regcache);
1290 pcreg = gdbarch_pc_regnum (gdbarch);
1294 /* We can only provide the PC register. */
1295 if (regno >= 0 && regno != pcreg)
1298 insn = btrace_insn_get (replay);
1299 gdb_assert (insn != NULL);
1301 regcache_raw_supply (regcache, regno, &insn->pc);
1305 struct target_ops *t = ops->beneath;
1307 t->to_fetch_registers (t, regcache, regno);
1311 /* The to_store_registers method of target record-btrace. */
1314 record_btrace_store_registers (struct target_ops *ops,
1315 struct regcache *regcache, int regno)
1317 struct target_ops *t;
1319 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1320 error (_("This record target does not allow writing registers."));
1322 gdb_assert (may_write_registers != 0);
1325 t->to_store_registers (t, regcache, regno);
1328 /* The to_prepare_to_store method of target record-btrace. */
1331 record_btrace_prepare_to_store (struct target_ops *ops,
1332 struct regcache *regcache)
1334 struct target_ops *t;
1336 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1340 t->to_prepare_to_store (t, regcache);
1343 /* The branch trace frame cache. */
1345 struct btrace_frame_cache
1348 struct thread_info *tp;
1350 /* The frame info. */
1351 struct frame_info *frame;
1353 /* The branch trace function segment. */
1354 const struct btrace_function *bfun;
1357 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1359 static htab_t bfcache;
1361 /* hash_f for htab_create_alloc of bfcache. */
1364 bfcache_hash (const void *arg)
1366 const struct btrace_frame_cache *cache = arg;
1368 return htab_hash_pointer (cache->frame);
1371 /* eq_f for htab_create_alloc of bfcache. */
1374 bfcache_eq (const void *arg1, const void *arg2)
1376 const struct btrace_frame_cache *cache1 = arg1;
1377 const struct btrace_frame_cache *cache2 = arg2;
1379 return cache1->frame == cache2->frame;
1382 /* Create a new btrace frame cache. */
1384 static struct btrace_frame_cache *
1385 bfcache_new (struct frame_info *frame)
1387 struct btrace_frame_cache *cache;
1390 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1391 cache->frame = frame;
1393 slot = htab_find_slot (bfcache, cache, INSERT);
1394 gdb_assert (*slot == NULL);
1400 /* Extract the branch trace function from a branch trace frame. */
1402 static const struct btrace_function *
1403 btrace_get_frame_function (struct frame_info *frame)
1405 const struct btrace_frame_cache *cache;
1406 const struct btrace_function *bfun;
1407 struct btrace_frame_cache pattern;
1410 pattern.frame = frame;
1412 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1420 /* Implement stop_reason method for record_btrace_frame_unwind. */
1422 static enum unwind_stop_reason
1423 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1426 const struct btrace_frame_cache *cache;
1427 const struct btrace_function *bfun;
1429 cache = *this_cache;
1431 gdb_assert (bfun != NULL);
1433 if (bfun->up == NULL)
1434 return UNWIND_UNAVAILABLE;
1436 return UNWIND_NO_REASON;
1439 /* Implement this_id method for record_btrace_frame_unwind. */
1442 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1443 struct frame_id *this_id)
1445 const struct btrace_frame_cache *cache;
1446 const struct btrace_function *bfun;
1447 CORE_ADDR code, special;
1449 cache = *this_cache;
1452 gdb_assert (bfun != NULL);
1454 while (bfun->segment.prev != NULL)
1455 bfun = bfun->segment.prev;
1457 code = get_frame_func (this_frame);
1458 special = bfun->number;
1460 *this_id = frame_id_build_unavailable_stack_special (code, special);
1462 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1463 btrace_get_bfun_name (cache->bfun),
1464 core_addr_to_string_nz (this_id->code_addr),
1465 core_addr_to_string_nz (this_id->special_addr));
1468 /* Implement prev_register method for record_btrace_frame_unwind. */
1470 static struct value *
1471 record_btrace_frame_prev_register (struct frame_info *this_frame,
1475 const struct btrace_frame_cache *cache;
1476 const struct btrace_function *bfun, *caller;
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1482 gdbarch = get_frame_arch (this_frame);
1483 pcreg = gdbarch_pc_regnum (gdbarch);
1484 if (pcreg < 0 || regnum != pcreg)
1485 throw_error (NOT_AVAILABLE_ERROR,
1486 _("Registers are not available in btrace record history"));
1488 cache = *this_cache;
1490 gdb_assert (bfun != NULL);
1494 throw_error (NOT_AVAILABLE_ERROR,
1495 _("No caller in btrace record history"));
1497 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1499 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1504 insn = VEC_last (btrace_insn_s, caller->insn);
1507 pc += gdb_insn_length (gdbarch, pc);
1510 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1511 btrace_get_bfun_name (bfun), bfun->level,
1512 core_addr_to_string_nz (pc));
1514 return frame_unwind_got_address (this_frame, regnum, pc);
1517 /* Implement sniffer method for record_btrace_frame_unwind. */
1520 record_btrace_frame_sniffer (const struct frame_unwind *self,
1521 struct frame_info *this_frame,
1524 const struct btrace_function *bfun;
1525 struct btrace_frame_cache *cache;
1526 struct thread_info *tp;
1527 struct frame_info *next;
1529 /* THIS_FRAME does not contain a reference to its thread. */
1530 tp = find_thread_ptid (inferior_ptid);
1531 gdb_assert (tp != NULL);
1534 next = get_next_frame (this_frame);
1537 const struct btrace_insn_iterator *replay;
1539 replay = tp->btrace.replay;
1541 bfun = replay->function;
1545 const struct btrace_function *callee;
1547 callee = btrace_get_frame_function (next);
1548 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1555 DEBUG ("[frame] sniffed frame for %s on level %d",
1556 btrace_get_bfun_name (bfun), bfun->level);
1558 /* This is our frame. Initialize the frame cache. */
1559 cache = bfcache_new (this_frame);
1563 *this_cache = cache;
1567 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1570 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1571 struct frame_info *this_frame,
1574 const struct btrace_function *bfun, *callee;
1575 struct btrace_frame_cache *cache;
1576 struct frame_info *next;
1578 next = get_next_frame (this_frame);
1582 callee = btrace_get_frame_function (next);
1586 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1593 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1594 btrace_get_bfun_name (bfun), bfun->level);
1596 /* This is our frame. Initialize the frame cache. */
1597 cache = bfcache_new (this_frame);
1598 cache->tp = find_thread_ptid (inferior_ptid);
1601 *this_cache = cache;
1606 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1608 struct btrace_frame_cache *cache;
1613 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1614 gdb_assert (slot != NULL);
1616 htab_remove_elt (bfcache, cache);
1619 /* btrace recording does not store previous memory content, neither the stack
1620 frames content. Any unwinding would return errorneous results as the stack
1621 contents no longer matches the changed PC value restored from history.
1622 Therefore this unwinder reports any possibly unwound registers as
1625 const struct frame_unwind record_btrace_frame_unwind =
1628 record_btrace_frame_unwind_stop_reason,
1629 record_btrace_frame_this_id,
1630 record_btrace_frame_prev_register,
1632 record_btrace_frame_sniffer,
1633 record_btrace_frame_dealloc_cache
1636 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1639 record_btrace_frame_unwind_stop_reason,
1640 record_btrace_frame_this_id,
1641 record_btrace_frame_prev_register,
1643 record_btrace_tailcall_frame_sniffer,
1644 record_btrace_frame_dealloc_cache
1647 /* Implement the to_get_unwinder method. */
1649 static const struct frame_unwind *
1650 record_btrace_to_get_unwinder (struct target_ops *self)
1652 return &record_btrace_frame_unwind;
1655 /* Implement the to_get_tailcall_unwinder method. */
1657 static const struct frame_unwind *
1658 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1660 return &record_btrace_tailcall_frame_unwind;
1663 /* Return a human-readable string for FLAG. */
1666 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1674 return "reverse-step";
1680 return "reverse-cont";
1689 /* Indicate that TP should be resumed according to FLAG. */
1692 record_btrace_resume_thread (struct thread_info *tp,
1693 enum btrace_thread_flag flag)
1695 struct btrace_thread_info *btinfo;
1697 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1698 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1700 btinfo = &tp->btrace;
1702 if ((btinfo->flags & BTHR_MOVE) != 0)
1703 error (_("Thread already moving."));
1705 /* Fetch the latest branch trace. */
1708 /* A resume request overwrites a preceding stop request. */
1709 btinfo->flags &= ~BTHR_STOP;
1710 btinfo->flags |= flag;
1713 /* Find the thread to resume given a PTID. */
1715 static struct thread_info *
1716 record_btrace_find_resume_thread (ptid_t ptid)
1718 struct thread_info *tp;
1720 /* When asked to resume everything, we pick the current thread. */
1721 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1722 ptid = inferior_ptid;
1724 return find_thread_ptid (ptid);
1727 /* Start replaying a thread. */
1729 static struct btrace_insn_iterator *
1730 record_btrace_start_replaying (struct thread_info *tp)
1732 struct btrace_insn_iterator *replay;
1733 struct btrace_thread_info *btinfo;
1736 btinfo = &tp->btrace;
1739 /* We can't start replaying without trace. */
1740 if (btinfo->begin == NULL)
1743 /* Clear the executing flag to allow changes to the current frame.
1744 We are not actually running, yet. We just started a reverse execution
1745 command or a record goto command.
1746 For the latter, EXECUTING is false and this has no effect.
1747 For the former, EXECUTING is true and we're in to_wait, about to
1748 move the thread. Since we need to recompute the stack, we temporarily
1749 set EXECUTING to flase. */
1750 executing = is_executing (tp->ptid);
1751 set_executing (tp->ptid, 0);
1753 /* GDB stores the current frame_id when stepping in order to detects steps
1755 Since frames are computed differently when we're replaying, we need to
1756 recompute those stored frames and fix them up so we can still detect
1757 subroutines after we started replaying. */
1760 struct frame_info *frame;
1761 struct frame_id frame_id;
1762 int upd_step_frame_id, upd_step_stack_frame_id;
1764 /* The current frame without replaying - computed via normal unwind. */
1765 frame = get_current_frame ();
1766 frame_id = get_frame_id (frame);
1768 /* Check if we need to update any stepping-related frame id's. */
1769 upd_step_frame_id = frame_id_eq (frame_id,
1770 tp->control.step_frame_id);
1771 upd_step_stack_frame_id = frame_id_eq (frame_id,
1772 tp->control.step_stack_frame_id);
1774 /* We start replaying at the end of the branch trace. This corresponds
1775 to the current instruction. */
1776 replay = XNEW (struct btrace_insn_iterator);
1777 btrace_insn_end (replay, btinfo);
1779 /* Skip gaps at the end of the trace. */
1780 while (btrace_insn_get (replay) == NULL)
1784 steps = btrace_insn_prev (replay, 1);
1786 error (_("No trace."));
1789 /* We're not replaying, yet. */
1790 gdb_assert (btinfo->replay == NULL);
1791 btinfo->replay = replay;
1793 /* Make sure we're not using any stale registers. */
1794 registers_changed_ptid (tp->ptid);
1796 /* The current frame with replaying - computed via btrace unwind. */
1797 frame = get_current_frame ();
1798 frame_id = get_frame_id (frame);
1800 /* Replace stepping related frames where necessary. */
1801 if (upd_step_frame_id)
1802 tp->control.step_frame_id = frame_id;
1803 if (upd_step_stack_frame_id)
1804 tp->control.step_stack_frame_id = frame_id;
1806 CATCH (except, RETURN_MASK_ALL)
1808 /* Restore the previous execution state. */
1809 set_executing (tp->ptid, executing);
1811 xfree (btinfo->replay);
1812 btinfo->replay = NULL;
1814 registers_changed_ptid (tp->ptid);
1816 throw_exception (except);
1820 /* Restore the previous execution state. */
1821 set_executing (tp->ptid, executing);
1826 /* Stop replaying a thread. */
1829 record_btrace_stop_replaying (struct thread_info *tp)
1831 struct btrace_thread_info *btinfo;
1833 btinfo = &tp->btrace;
1835 xfree (btinfo->replay);
1836 btinfo->replay = NULL;
1838 /* Make sure we're not leaving any stale registers. */
1839 registers_changed_ptid (tp->ptid);
1842 /* Stop replaying TP if it is at the end of its execution history. */
1845 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1847 struct btrace_insn_iterator *replay, end;
1848 struct btrace_thread_info *btinfo;
1850 btinfo = &tp->btrace;
1851 replay = btinfo->replay;
1856 btrace_insn_end (&end, btinfo);
1858 if (btrace_insn_cmp (replay, &end) == 0)
1859 record_btrace_stop_replaying (tp);
1862 /* The to_resume method of target record-btrace. */
1865 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1866 enum gdb_signal signal)
1868 struct thread_info *tp, *other;
1869 enum btrace_thread_flag flag;
1871 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1872 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1873 step ? "step" : "cont");
1875 /* Store the execution direction of the last resume. */
1876 record_btrace_resume_exec_dir = execution_direction;
1878 tp = record_btrace_find_resume_thread (ptid);
1880 error (_("Cannot find thread to resume."));
1882 /* Stop replaying other threads if the thread to resume is not replaying. */
1883 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1884 ALL_NON_EXITED_THREADS (other)
1885 record_btrace_stop_replaying (other);
1887 /* As long as we're not replaying, just forward the request. */
1888 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1891 return ops->to_resume (ops, ptid, step, signal);
1894 /* Compute the btrace thread flag for the requested move. */
1896 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1898 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1900 /* At the moment, we only move a single thread. We could also move
1901 all threads in parallel by single-stepping each resumed thread
1902 until the first runs into an event.
1903 When we do that, we would want to continue all other threads.
1904 For now, just resume one thread to not confuse to_wait. */
1905 record_btrace_resume_thread (tp, flag);
1907 /* We just indicate the resume intent here. The actual stepping happens in
1908 record_btrace_wait below. */
1910 /* Async support. */
1911 if (target_can_async_p ())
1914 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1918 /* Cancel resuming TP. */
1921 record_btrace_cancel_resume (struct thread_info *tp)
1923 enum btrace_thread_flag flags;
1925 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1929 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1930 target_pid_to_str (tp->ptid), flags,
1931 btrace_thread_flag_to_str (flags));
1933 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1934 record_btrace_stop_replaying_at_end (tp);
1937 /* Return a target_waitstatus indicating that we ran out of history. */
1939 static struct target_waitstatus
1940 btrace_step_no_history (void)
1942 struct target_waitstatus status;
1944 status.kind = TARGET_WAITKIND_NO_HISTORY;
1949 /* Return a target_waitstatus indicating that a step finished. */
1951 static struct target_waitstatus
1952 btrace_step_stopped (void)
1954 struct target_waitstatus status;
1956 status.kind = TARGET_WAITKIND_STOPPED;
1957 status.value.sig = GDB_SIGNAL_TRAP;
1962 /* Return a target_waitstatus indicating that a thread was stopped as
1965 static struct target_waitstatus
1966 btrace_step_stopped_on_request (void)
1968 struct target_waitstatus status;
1970 status.kind = TARGET_WAITKIND_STOPPED;
1971 status.value.sig = GDB_SIGNAL_0;
1976 /* Return a target_waitstatus indicating a spurious stop. */
1978 static struct target_waitstatus
1979 btrace_step_spurious (void)
1981 struct target_waitstatus status;
1983 status.kind = TARGET_WAITKIND_SPURIOUS;
1988 /* Return a target_waitstatus indicating that the thread was not resumed. */
1990 static struct target_waitstatus
1991 btrace_step_no_resumed (void)
1993 struct target_waitstatus status;
1995 status.kind = TARGET_WAITKIND_NO_RESUMED;
2000 /* Return a target_waitstatus indicating that we should wait again. */
2002 static struct target_waitstatus
2003 btrace_step_again (void)
2005 struct target_waitstatus status;
2007 status.kind = TARGET_WAITKIND_IGNORE;
2012 /* Clear the record histories. */
2015 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2017 xfree (btinfo->insn_history);
2018 xfree (btinfo->call_history);
2020 btinfo->insn_history = NULL;
2021 btinfo->call_history = NULL;
2024 /* Check whether TP's current replay position is at a breakpoint. */
2027 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2029 struct btrace_insn_iterator *replay;
2030 struct btrace_thread_info *btinfo;
2031 const struct btrace_insn *insn;
2032 struct inferior *inf;
2034 btinfo = &tp->btrace;
2035 replay = btinfo->replay;
2040 insn = btrace_insn_get (replay);
2044 inf = find_inferior_ptid (tp->ptid);
2048 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2049 &btinfo->stop_reason);
2052 /* Step one instruction in forward direction. */
2054 static struct target_waitstatus
2055 record_btrace_single_step_forward (struct thread_info *tp)
2057 struct btrace_insn_iterator *replay, end;
2058 struct btrace_thread_info *btinfo;
2060 btinfo = &tp->btrace;
2061 replay = btinfo->replay;
2063 /* We're done if we're not replaying. */
2065 return btrace_step_no_history ();
2067 /* Check if we're stepping a breakpoint. */
2068 if (record_btrace_replay_at_breakpoint (tp))
2069 return btrace_step_stopped ();
2071 /* Skip gaps during replay. */
2076 /* We will bail out here if we continue stepping after reaching the end
2077 of the execution history. */
2078 steps = btrace_insn_next (replay, 1);
2080 return btrace_step_no_history ();
2082 while (btrace_insn_get (replay) == NULL);
2084 /* Determine the end of the instruction trace. */
2085 btrace_insn_end (&end, btinfo);
2087 /* The execution trace contains (and ends with) the current instruction.
2088 This instruction has not been executed, yet, so the trace really ends
2089 one instruction earlier. */
2090 if (btrace_insn_cmp (replay, &end) == 0)
2091 return btrace_step_no_history ();
2093 return btrace_step_spurious ();
2096 /* Step one instruction in backward direction. */
2098 static struct target_waitstatus
2099 record_btrace_single_step_backward (struct thread_info *tp)
2101 struct btrace_insn_iterator *replay;
2102 struct btrace_thread_info *btinfo;
2104 btinfo = &tp->btrace;
2105 replay = btinfo->replay;
2107 /* Start replaying if we're not already doing so. */
2109 replay = record_btrace_start_replaying (tp);
2111 /* If we can't step any further, we reached the end of the history.
2112 Skip gaps during replay. */
2117 steps = btrace_insn_prev (replay, 1);
2119 return btrace_step_no_history ();
2121 while (btrace_insn_get (replay) == NULL);
2123 /* Check if we're stepping a breakpoint.
2125 For reverse-stepping, this check is after the step. There is logic in
2126 infrun.c that handles reverse-stepping separately. See, for example,
2127 proceed and adjust_pc_after_break.
2129 This code assumes that for reverse-stepping, PC points to the last
2130 de-executed instruction, whereas for forward-stepping PC points to the
2131 next to-be-executed instruction. */
2132 if (record_btrace_replay_at_breakpoint (tp))
2133 return btrace_step_stopped ();
2135 return btrace_step_spurious ();
2138 /* Step a single thread. */
2140 static struct target_waitstatus
2141 record_btrace_step_thread (struct thread_info *tp)
2143 struct btrace_thread_info *btinfo;
2144 struct target_waitstatus status;
2145 enum btrace_thread_flag flags;
2147 btinfo = &tp->btrace;
2149 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2150 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2152 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2153 target_pid_to_str (tp->ptid), flags,
2154 btrace_thread_flag_to_str (flags));
2156 /* We can't step without an execution history. */
2157 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2158 return btrace_step_no_history ();
2163 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2166 return btrace_step_stopped_on_request ();
2169 status = record_btrace_single_step_forward (tp);
2170 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2173 return btrace_step_stopped ();
2176 status = record_btrace_single_step_backward (tp);
2177 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2180 return btrace_step_stopped ();
2183 status = record_btrace_single_step_forward (tp);
2184 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2187 btinfo->flags |= flags;
2188 return btrace_step_again ();
2191 status = record_btrace_single_step_backward (tp);
2192 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2195 btinfo->flags |= flags;
2196 return btrace_step_again ();
2199 /* We keep threads moving at the end of their execution history. The to_wait
2200 method will stop the thread for whom the event is reported. */
2201 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2202 btinfo->flags |= flags;
2207 /* A vector of threads. */
2209 typedef struct thread_info * tp_t;
2212 /* The to_wait method of target record-btrace. */
2215 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2216 struct target_waitstatus *status, int options)
2218 VEC (tp_t) *moving, *no_history;
2219 struct thread_info *tp, *eventing;
2220 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2222 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2224 /* As long as we're not replaying, just forward the request. */
2225 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2228 return ops->to_wait (ops, ptid, status, options);
2234 make_cleanup (VEC_cleanup (tp_t), &moving);
2235 make_cleanup (VEC_cleanup (tp_t), &no_history);
2237 /* Keep a work list of moving threads. */
2238 ALL_NON_EXITED_THREADS (tp)
2239 if (ptid_match (tp->ptid, ptid)
2240 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2241 VEC_safe_push (tp_t, moving, tp);
2243 if (VEC_empty (tp_t, moving))
2245 *status = btrace_step_no_resumed ();
2247 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2248 target_waitstatus_to_string (status));
2250 do_cleanups (cleanups);
2254 /* Step moving threads one by one, one step each, until either one thread
2255 reports an event or we run out of threads to step.
2257 When stepping more than one thread, chances are that some threads reach
2258 the end of their execution history earlier than others. If we reported
2259 this immediately, all-stop on top of non-stop would stop all threads and
2260 resume the same threads next time. And we would report the same thread
2261 having reached the end of its execution history again.
2263 In the worst case, this would starve the other threads. But even if other
2264 threads would be allowed to make progress, this would result in far too
2265 many intermediate stops.
2267 We therefore delay the reporting of "no execution history" until we have
2268 nothing else to report. By this time, all threads should have moved to
2269 either the beginning or the end of their execution history. There will
2270 be a single user-visible stop. */
2272 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2277 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2279 *status = record_btrace_step_thread (tp);
2281 switch (status->kind)
2283 case TARGET_WAITKIND_IGNORE:
2287 case TARGET_WAITKIND_NO_HISTORY:
2288 VEC_safe_push (tp_t, no_history,
2289 VEC_ordered_remove (tp_t, moving, ix));
2293 eventing = VEC_unordered_remove (tp_t, moving, ix);
2299 if (eventing == NULL)
2301 /* We started with at least one moving thread. This thread must have
2302 either stopped or reached the end of its execution history.
2304 In the former case, EVENTING must not be NULL.
2305 In the latter case, NO_HISTORY must not be empty. */
2306 gdb_assert (!VEC_empty (tp_t, no_history));
2308 /* We kept threads moving at the end of their execution history. Stop
2309 EVENTING now that we are going to report its stop. */
2310 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2311 eventing->btrace.flags &= ~BTHR_MOVE;
2313 *status = btrace_step_no_history ();
2316 gdb_assert (eventing != NULL);
2318 /* We kept threads replaying at the end of their execution history. Stop
2319 replaying EVENTING now that we are going to report its stop. */
2320 record_btrace_stop_replaying_at_end (eventing);
2322 /* Stop all other threads. */
2323 if (!target_is_non_stop_p ())
2324 ALL_NON_EXITED_THREADS (tp)
2325 record_btrace_cancel_resume (tp);
2327 /* Start record histories anew from the current position. */
2328 record_btrace_clear_histories (&eventing->btrace);
2330 /* We moved the replay position but did not update registers. */
2331 registers_changed_ptid (eventing->ptid);
2333 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2334 target_pid_to_str (eventing->ptid),
2335 target_waitstatus_to_string (status));
2337 do_cleanups (cleanups);
2338 return eventing->ptid;
2341 /* The to_stop method of target record-btrace. */
2344 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2346 DEBUG ("stop %s", target_pid_to_str (ptid));
2348 /* As long as we're not replaying, just forward the request. */
2349 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2352 ops->to_stop (ops, ptid);
2356 struct thread_info *tp;
2358 ALL_NON_EXITED_THREADS (tp)
2359 if (ptid_match (tp->ptid, ptid))
2361 tp->btrace.flags &= ~BTHR_MOVE;
2362 tp->btrace.flags |= BTHR_STOP;
2367 /* The to_can_execute_reverse method of target record-btrace. */
2370 record_btrace_can_execute_reverse (struct target_ops *self)
2375 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2378 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2380 if (record_btrace_is_replaying (ops))
2382 struct thread_info *tp = inferior_thread ();
2384 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2387 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2390 /* The to_supports_stopped_by_sw_breakpoint method of target
2394 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2396 if (record_btrace_is_replaying (ops))
2399 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2402 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2405 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2407 if (record_btrace_is_replaying (ops))
2409 struct thread_info *tp = inferior_thread ();
2411 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2414 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2417 /* The to_supports_stopped_by_hw_breakpoint method of target
2421 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2423 if (record_btrace_is_replaying (ops))
2426 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2429 /* The to_update_thread_list method of target record-btrace. */
2432 record_btrace_update_thread_list (struct target_ops *ops)
2434 /* We don't add or remove threads during replay. */
2435 if (record_btrace_is_replaying (ops))
2438 /* Forward the request. */
2440 ops->to_update_thread_list (ops);
2443 /* The to_thread_alive method of target record-btrace. */
2446 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2448 /* We don't add or remove threads during replay. */
2449 if (record_btrace_is_replaying (ops))
2450 return find_thread_ptid (ptid) != NULL;
2452 /* Forward the request. */
2454 return ops->to_thread_alive (ops, ptid);
2457 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2461 record_btrace_set_replay (struct thread_info *tp,
2462 const struct btrace_insn_iterator *it)
2464 struct btrace_thread_info *btinfo;
2466 btinfo = &tp->btrace;
2468 if (it == NULL || it->function == NULL)
2469 record_btrace_stop_replaying (tp);
2472 if (btinfo->replay == NULL)
2473 record_btrace_start_replaying (tp);
2474 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2477 *btinfo->replay = *it;
2478 registers_changed_ptid (tp->ptid);
2481 /* Start anew from the new replay position. */
2482 record_btrace_clear_histories (btinfo);
2484 stop_pc = regcache_read_pc (get_current_regcache ());
2485 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2488 /* The to_goto_record_begin method of target record-btrace. */
2491 record_btrace_goto_begin (struct target_ops *self)
2493 struct thread_info *tp;
2494 struct btrace_insn_iterator begin;
2496 tp = require_btrace_thread ();
2498 btrace_insn_begin (&begin, &tp->btrace);
2499 record_btrace_set_replay (tp, &begin);
2502 /* The to_goto_record_end method of target record-btrace. */
2505 record_btrace_goto_end (struct target_ops *ops)
2507 struct thread_info *tp;
2509 tp = require_btrace_thread ();
2511 record_btrace_set_replay (tp, NULL);
2514 /* The to_goto_record method of target record-btrace. */
2517 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2519 struct thread_info *tp;
2520 struct btrace_insn_iterator it;
2521 unsigned int number;
2526 /* Check for wrap-arounds. */
2528 error (_("Instruction number out of range."));
2530 tp = require_btrace_thread ();
2532 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2534 error (_("No such instruction."));
2536 record_btrace_set_replay (tp, &it);
2539 /* The to_execution_direction target method. */
2541 static enum exec_direction_kind
2542 record_btrace_execution_direction (struct target_ops *self)
2544 return record_btrace_resume_exec_dir;
2547 /* The to_prepare_to_generate_core target method. */
2550 record_btrace_prepare_to_generate_core (struct target_ops *self)
2552 record_btrace_generating_corefile = 1;
2555 /* The to_done_generating_core target method. */
2558 record_btrace_done_generating_core (struct target_ops *self)
2560 record_btrace_generating_corefile = 0;
2563 /* Initialize the record-btrace target ops. */
2566 init_record_btrace_ops (void)
2568 struct target_ops *ops;
2570 ops = &record_btrace_ops;
2571 ops->to_shortname = "record-btrace";
2572 ops->to_longname = "Branch tracing target";
2573 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2574 ops->to_open = record_btrace_open;
2575 ops->to_close = record_btrace_close;
2576 ops->to_async = record_btrace_async;
2577 ops->to_detach = record_detach;
2578 ops->to_disconnect = record_disconnect;
2579 ops->to_mourn_inferior = record_mourn_inferior;
2580 ops->to_kill = record_kill;
2581 ops->to_stop_recording = record_btrace_stop_recording;
2582 ops->to_info_record = record_btrace_info;
2583 ops->to_insn_history = record_btrace_insn_history;
2584 ops->to_insn_history_from = record_btrace_insn_history_from;
2585 ops->to_insn_history_range = record_btrace_insn_history_range;
2586 ops->to_call_history = record_btrace_call_history;
2587 ops->to_call_history_from = record_btrace_call_history_from;
2588 ops->to_call_history_range = record_btrace_call_history_range;
2589 ops->to_record_is_replaying = record_btrace_is_replaying;
2590 ops->to_xfer_partial = record_btrace_xfer_partial;
2591 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2592 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2593 ops->to_fetch_registers = record_btrace_fetch_registers;
2594 ops->to_store_registers = record_btrace_store_registers;
2595 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2596 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2597 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2598 ops->to_resume = record_btrace_resume;
2599 ops->to_wait = record_btrace_wait;
2600 ops->to_stop = record_btrace_stop;
2601 ops->to_update_thread_list = record_btrace_update_thread_list;
2602 ops->to_thread_alive = record_btrace_thread_alive;
2603 ops->to_goto_record_begin = record_btrace_goto_begin;
2604 ops->to_goto_record_end = record_btrace_goto_end;
2605 ops->to_goto_record = record_btrace_goto;
2606 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2607 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2608 ops->to_supports_stopped_by_sw_breakpoint
2609 = record_btrace_supports_stopped_by_sw_breakpoint;
2610 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2611 ops->to_supports_stopped_by_hw_breakpoint
2612 = record_btrace_supports_stopped_by_hw_breakpoint;
2613 ops->to_execution_direction = record_btrace_execution_direction;
2614 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2615 ops->to_done_generating_core = record_btrace_done_generating_core;
2616 ops->to_stratum = record_stratum;
2617 ops->to_magic = OPS_MAGIC;
2620 /* Start recording in BTS format. */
2623 cmd_record_btrace_bts_start (char *args, int from_tty)
2625 if (args != NULL && *args != 0)
2626 error (_("Invalid argument."));
2628 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2632 execute_command ("target record-btrace", from_tty);
2634 CATCH (exception, RETURN_MASK_ALL)
2636 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2637 throw_exception (exception);
2642 /* Start recording Intel(R) Processor Trace. */
2645 cmd_record_btrace_pt_start (char *args, int from_tty)
2647 if (args != NULL && *args != 0)
2648 error (_("Invalid argument."));
2650 record_btrace_conf.format = BTRACE_FORMAT_PT;
2654 execute_command ("target record-btrace", from_tty);
2656 CATCH (exception, RETURN_MASK_ALL)
2658 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2659 throw_exception (exception);
2664 /* Alias for "target record". */
2667 cmd_record_btrace_start (char *args, int from_tty)
2669 if (args != NULL && *args != 0)
2670 error (_("Invalid argument."));
2672 record_btrace_conf.format = BTRACE_FORMAT_PT;
2676 execute_command ("target record-btrace", from_tty);
2678 CATCH (exception, RETURN_MASK_ALL)
2680 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2684 execute_command ("target record-btrace", from_tty);
2686 CATCH (exception, RETURN_MASK_ALL)
2688 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2689 throw_exception (exception);
2696 /* The "set record btrace" command. */
2699 cmd_set_record_btrace (char *args, int from_tty)
2701 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2704 /* The "show record btrace" command. */
2707 cmd_show_record_btrace (char *args, int from_tty)
2709 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2712 /* The "show record btrace replay-memory-access" command. */
2715 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2716 struct cmd_list_element *c, const char *value)
2718 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2719 replay_memory_access);
2722 /* The "set record btrace bts" command. */
2725 cmd_set_record_btrace_bts (char *args, int from_tty)
2727 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2728 "by an appropriate subcommand.\n"));
2729 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2730 all_commands, gdb_stdout);
2733 /* The "show record btrace bts" command. */
2736 cmd_show_record_btrace_bts (char *args, int from_tty)
2738 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2741 /* The "set record btrace pt" command. */
2744 cmd_set_record_btrace_pt (char *args, int from_tty)
2746 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2747 "by an appropriate subcommand.\n"));
2748 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2749 all_commands, gdb_stdout);
2752 /* The "show record btrace pt" command. */
2755 cmd_show_record_btrace_pt (char *args, int from_tty)
2757 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2760 /* The "record bts buffer-size" show value function. */
2763 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2764 struct cmd_list_element *c,
2767 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2771 /* The "record pt buffer-size" show value function. */
2774 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2775 struct cmd_list_element *c,
2778 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2782 void _initialize_record_btrace (void);
2784 /* Initialize btrace commands. */
2787 _initialize_record_btrace (void)
2789 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2790 _("Start branch trace recording."), &record_btrace_cmdlist,
2791 "record btrace ", 0, &record_cmdlist);
2792 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2794 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2796 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2797 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2798 This format may not be available on all processors."),
2799 &record_btrace_cmdlist);
2800 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2802 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2804 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2805 This format may not be available on all processors."),
2806 &record_btrace_cmdlist);
2807 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2809 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2810 _("Set record options"), &set_record_btrace_cmdlist,
2811 "set record btrace ", 0, &set_record_cmdlist);
2813 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2814 _("Show record options"), &show_record_btrace_cmdlist,
2815 "show record btrace ", 0, &show_record_cmdlist);
2817 add_setshow_enum_cmd ("replay-memory-access", no_class,
2818 replay_memory_access_types, &replay_memory_access, _("\
2819 Set what memory accesses are allowed during replay."), _("\
2820 Show what memory accesses are allowed during replay."),
2821 _("Default is READ-ONLY.\n\n\
2822 The btrace record target does not trace data.\n\
2823 The memory therefore corresponds to the live target and not \
2824 to the current replay position.\n\n\
2825 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2826 When READ-WRITE, allow accesses to read-only and read-write memory during \
2828 NULL, cmd_show_replay_memory_access,
2829 &set_record_btrace_cmdlist,
2830 &show_record_btrace_cmdlist);
2832 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2833 _("Set record btrace bts options"),
2834 &set_record_btrace_bts_cmdlist,
2835 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2837 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2838 _("Show record btrace bts options"),
2839 &show_record_btrace_bts_cmdlist,
2840 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2842 add_setshow_uinteger_cmd ("buffer-size", no_class,
2843 &record_btrace_conf.bts.size,
2844 _("Set the record/replay bts buffer size."),
2845 _("Show the record/replay bts buffer size."), _("\
2846 When starting recording request a trace buffer of this size. \
2847 The actual buffer size may differ from the requested size. \
2848 Use \"info record\" to see the actual buffer size.\n\n\
2849 Bigger buffers allow longer recording but also take more time to process \
2850 the recorded execution trace.\n\n\
2851 The trace buffer size may not be changed while recording."), NULL,
2852 show_record_bts_buffer_size_value,
2853 &set_record_btrace_bts_cmdlist,
2854 &show_record_btrace_bts_cmdlist);
2856 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2857 _("Set record btrace pt options"),
2858 &set_record_btrace_pt_cmdlist,
2859 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2861 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2862 _("Show record btrace pt options"),
2863 &show_record_btrace_pt_cmdlist,
2864 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2866 add_setshow_uinteger_cmd ("buffer-size", no_class,
2867 &record_btrace_conf.pt.size,
2868 _("Set the record/replay pt buffer size."),
2869 _("Show the record/replay pt buffer size."), _("\
2870 Bigger buffers allow longer recording but also take more time to process \
2871 the recorded execution.\n\
2872 The actual buffer size may differ from the requested size. Use \"info record\" \
2873 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2874 &set_record_btrace_pt_cmdlist,
2875 &show_record_btrace_pt_cmdlist);
2877 init_record_btrace_ops ();
2878 add_target (&record_btrace_ops);
2880 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2883 record_btrace_conf.bts.size = 64 * 1024;
2884 record_btrace_conf.pt.size = 16 * 1024;