1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
91 #define DEBUG(msg, args...) \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
101 /* Update the branch trace for the current thread and return a pointer to its
104 Throws an error if there is no thread or no trace. This function never
107 static struct thread_info *
108 require_btrace_thread (void)
110 struct thread_info *tp;
114 tp = find_thread_ptid (inferior_ptid);
116 error (_("No thread."));
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
129 Throws an error if there is no thread or no trace. This function never
132 static struct btrace_thread_info *
133 require_btrace (void)
135 struct thread_info *tp;
137 tp = require_btrace_thread ();
142 /* Enable branch tracing for one thread. Warn on errors. */
145 record_btrace_enable_warn (struct thread_info *tp)
149 btrace_enable (tp, &record_btrace_conf);
151 CATCH (error, RETURN_MASK_ERROR)
153 warning ("%s", error.message);
158 /* Callback function to disable branch tracing for one thread. */
161 record_btrace_disable_callback (void *arg)
163 struct thread_info *tp;
170 /* Enable automatic tracing of new threads. */
173 record_btrace_auto_enable (void)
175 DEBUG ("attach thread observer");
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
181 /* Disable automatic tracing of new threads. */
184 record_btrace_auto_disable (void)
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
190 DEBUG ("detach thread observer");
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
196 /* The record-btrace async event handler function. */
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
201 inferior_event_handler (INF_REG_EVENT, NULL);
204 /* The to_open method of target record-btrace. */
207 record_btrace_open (const char *args, int from_tty)
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
216 if (!target_has_execution)
217 error (_("The program is not being run."));
220 error (_("Record btrace can't debug inferior in non-stop mode."));
222 gdb_assert (record_btrace_thread_observer == NULL);
224 disable_chain = make_cleanup (null_cleanup, NULL);
225 ALL_NON_EXITED_THREADS (tp)
226 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
228 btrace_enable (tp, &record_btrace_conf);
230 make_cleanup (record_btrace_disable_callback, tp);
233 record_btrace_auto_enable ();
235 push_target (&record_btrace_ops);
237 record_btrace_async_inferior_event_handler
238 = create_async_event_handler (record_btrace_handle_async_inferior_event,
240 record_btrace_generating_corefile = 0;
242 observer_notify_record_changed (current_inferior (), 1);
244 discard_cleanups (disable_chain);
247 /* The to_stop_recording method of target record-btrace. */
250 record_btrace_stop_recording (struct target_ops *self)
252 struct thread_info *tp;
254 DEBUG ("stop recording");
256 record_btrace_auto_disable ();
258 ALL_NON_EXITED_THREADS (tp)
259 if (tp->btrace.target != NULL)
263 /* The to_close method of target record-btrace. */
266 record_btrace_close (struct target_ops *self)
268 struct thread_info *tp;
270 if (record_btrace_async_inferior_event_handler != NULL)
271 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
273 /* Make sure automatic recording gets disabled even if we did not stop
274 recording before closing the record-btrace target. */
275 record_btrace_auto_disable ();
277 /* We should have already stopped recording.
278 Tear down btrace in case we have not. */
279 ALL_NON_EXITED_THREADS (tp)
280 btrace_teardown (tp);
283 /* The to_async method of target record-btrace. */
286 record_btrace_async (struct target_ops *ops, int enable)
289 mark_async_event_handler (record_btrace_async_inferior_event_handler);
291 clear_async_event_handler (record_btrace_async_inferior_event_handler);
293 ops->beneath->to_async (ops->beneath, enable);
296 /* Adjusts the size and returns a human readable size suffix. */
299 record_btrace_adjust_size (unsigned int *size)
305 if ((sz & ((1u << 30) - 1)) == 0)
310 else if ((sz & ((1u << 20) - 1)) == 0)
315 else if ((sz & ((1u << 10) - 1)) == 0)
324 /* Print a BTS configuration. */
327 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
335 suffix = record_btrace_adjust_size (&size);
336 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
340 /* Print an Intel(R) Processor Trace configuration. */
343 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
351 suffix = record_btrace_adjust_size (&size);
352 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
356 /* Print a branch tracing configuration. */
359 record_btrace_print_conf (const struct btrace_config *conf)
361 printf_unfiltered (_("Recording format: %s.\n"),
362 btrace_format_string (conf->format));
364 switch (conf->format)
366 case BTRACE_FORMAT_NONE:
369 case BTRACE_FORMAT_BTS:
370 record_btrace_print_bts_conf (&conf->bts);
373 case BTRACE_FORMAT_PT:
374 record_btrace_print_pt_conf (&conf->pt);
378 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
381 /* The to_info_record method of target record-btrace. */
384 record_btrace_info (struct target_ops *self)
386 struct btrace_thread_info *btinfo;
387 const struct btrace_config *conf;
388 struct thread_info *tp;
389 unsigned int insns, calls, gaps;
393 tp = find_thread_ptid (inferior_ptid);
395 error (_("No thread."));
397 btinfo = &tp->btrace;
399 conf = btrace_conf (btinfo);
401 record_btrace_print_conf (conf);
409 if (!btrace_is_empty (tp))
411 struct btrace_call_iterator call;
412 struct btrace_insn_iterator insn;
414 btrace_call_end (&call, btinfo);
415 btrace_call_prev (&call, 1);
416 calls = btrace_call_number (&call);
418 btrace_insn_end (&insn, btinfo);
420 insns = btrace_insn_number (&insn);
423 /* The last instruction does not really belong to the trace. */
430 /* Skip gaps at the end. */
433 steps = btrace_insn_prev (&insn, 1);
437 insns = btrace_insn_number (&insn);
442 gaps = btinfo->ngaps;
445 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
446 "for thread %d (%s).\n"), insns, calls, gaps,
447 tp->num, target_pid_to_str (tp->ptid));
449 if (btrace_is_replaying (tp))
450 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
451 btrace_insn_number (btinfo->replay));
454 /* Print a decode error. */
457 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
458 enum btrace_format format)
463 errstr = _("unknown");
471 case BTRACE_FORMAT_BTS:
477 case BDE_BTS_OVERFLOW:
478 errstr = _("instruction overflow");
481 case BDE_BTS_INSN_SIZE:
482 errstr = _("unknown instruction");
487 #if defined (HAVE_LIBIPT)
488 case BTRACE_FORMAT_PT:
491 case BDE_PT_USER_QUIT:
493 errstr = _("trace decode cancelled");
496 case BDE_PT_DISABLED:
498 errstr = _("disabled");
501 case BDE_PT_OVERFLOW:
503 errstr = _("overflow");
508 errstr = pt_errstr (pt_errcode (errcode));
512 #endif /* defined (HAVE_LIBIPT) */
515 ui_out_text (uiout, _("["));
518 ui_out_text (uiout, _("decode error ("));
519 ui_out_field_int (uiout, "errcode", errcode);
520 ui_out_text (uiout, _("): "));
522 ui_out_text (uiout, errstr);
523 ui_out_text (uiout, _("]\n"));
526 /* Print an unsigned int. */
529 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
531 ui_out_field_fmt (uiout, fld, "%u", val);
534 /* Disassemble a section of the recorded instruction trace. */
537 btrace_insn_history (struct ui_out *uiout,
538 const struct btrace_thread_info *btinfo,
539 const struct btrace_insn_iterator *begin,
540 const struct btrace_insn_iterator *end, int flags)
542 struct gdbarch *gdbarch;
543 struct btrace_insn_iterator it;
545 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
546 btrace_insn_number (end));
548 gdbarch = target_gdbarch ();
550 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
552 const struct btrace_insn *insn;
554 insn = btrace_insn_get (&it);
556 /* A NULL instruction indicates a gap in the trace. */
559 const struct btrace_config *conf;
561 conf = btrace_conf (btinfo);
563 /* We have trace so we must have a configuration. */
564 gdb_assert (conf != NULL);
566 btrace_ui_out_decode_error (uiout, it.function->errcode,
573 /* We may add a speculation prefix later. We use the same space
574 that is used for the pc prefix. */
575 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
576 strncpy (prefix, pc_prefix (insn->pc), 3);
585 /* Print the instruction index. */
586 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
587 ui_out_text (uiout, "\t");
589 /* Indicate speculative execution by a leading '?'. */
590 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
593 /* Print the prefix; we tell gdb_disassembly below to omit it. */
594 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
596 /* Disassembly with '/m' flag may not produce the expected result.
598 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
599 1, insn->pc, insn->pc + 1);
604 /* The to_insn_history method of target record-btrace. */
607 record_btrace_insn_history (struct target_ops *self, int size, int flags)
609 struct btrace_thread_info *btinfo;
610 struct btrace_insn_history *history;
611 struct btrace_insn_iterator begin, end;
612 struct cleanup *uiout_cleanup;
613 struct ui_out *uiout;
614 unsigned int context, covered;
616 uiout = current_uiout;
617 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
619 context = abs (size);
621 error (_("Bad record instruction-history-size."));
623 btinfo = require_btrace ();
624 history = btinfo->insn_history;
627 struct btrace_insn_iterator *replay;
629 DEBUG ("insn-history (0x%x): %d", flags, size);
631 /* If we're replaying, we start at the replay position. Otherwise, we
632 start at the tail of the trace. */
633 replay = btinfo->replay;
637 btrace_insn_end (&begin, btinfo);
639 /* We start from here and expand in the requested direction. Then we
640 expand in the other direction, as well, to fill up any remaining
645 /* We want the current position covered, as well. */
646 covered = btrace_insn_next (&end, 1);
647 covered += btrace_insn_prev (&begin, context - covered);
648 covered += btrace_insn_next (&end, context - covered);
652 covered = btrace_insn_next (&end, context);
653 covered += btrace_insn_prev (&begin, context - covered);
658 begin = history->begin;
661 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
662 btrace_insn_number (&begin), btrace_insn_number (&end));
667 covered = btrace_insn_prev (&begin, context);
672 covered = btrace_insn_next (&end, context);
677 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
681 printf_unfiltered (_("At the start of the branch trace record.\n"));
683 printf_unfiltered (_("At the end of the branch trace record.\n"));
686 btrace_set_insn_history (btinfo, &begin, &end);
687 do_cleanups (uiout_cleanup);
690 /* The to_insn_history_range method of target record-btrace. */
693 record_btrace_insn_history_range (struct target_ops *self,
694 ULONGEST from, ULONGEST to, int flags)
696 struct btrace_thread_info *btinfo;
697 struct btrace_insn_history *history;
698 struct btrace_insn_iterator begin, end;
699 struct cleanup *uiout_cleanup;
700 struct ui_out *uiout;
701 unsigned int low, high;
704 uiout = current_uiout;
705 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
710 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
712 /* Check for wrap-arounds. */
713 if (low != from || high != to)
714 error (_("Bad range."));
717 error (_("Bad range."));
719 btinfo = require_btrace ();
721 found = btrace_find_insn_by_number (&begin, btinfo, low);
723 error (_("Range out of bounds."));
725 found = btrace_find_insn_by_number (&end, btinfo, high);
728 /* Silently truncate the range. */
729 btrace_insn_end (&end, btinfo);
733 /* We want both begin and end to be inclusive. */
734 btrace_insn_next (&end, 1);
737 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
738 btrace_set_insn_history (btinfo, &begin, &end);
740 do_cleanups (uiout_cleanup);
743 /* The to_insn_history_from method of target record-btrace. */
746 record_btrace_insn_history_from (struct target_ops *self,
747 ULONGEST from, int size, int flags)
749 ULONGEST begin, end, context;
751 context = abs (size);
753 error (_("Bad record instruction-history-size."));
762 begin = from - context + 1;
767 end = from + context - 1;
769 /* Check for wrap-around. */
774 record_btrace_insn_history_range (self, begin, end, flags);
777 /* Print the instruction number range for a function call history line. */
780 btrace_call_history_insn_range (struct ui_out *uiout,
781 const struct btrace_function *bfun)
783 unsigned int begin, end, size;
785 size = VEC_length (btrace_insn_s, bfun->insn);
786 gdb_assert (size > 0);
788 begin = bfun->insn_offset;
789 end = begin + size - 1;
791 ui_out_field_uint (uiout, "insn begin", begin);
792 ui_out_text (uiout, ",");
793 ui_out_field_uint (uiout, "insn end", end);
796 /* Compute the lowest and highest source line for the instructions in BFUN
797 and return them in PBEGIN and PEND.
798 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
799 result from inlining or macro expansion. */
802 btrace_compute_src_line_range (const struct btrace_function *bfun,
803 int *pbegin, int *pend)
805 struct btrace_insn *insn;
806 struct symtab *symtab;
818 symtab = symbol_symtab (sym);
820 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
822 struct symtab_and_line sal;
824 sal = find_pc_line (insn->pc, 0);
825 if (sal.symtab != symtab || sal.line == 0)
828 begin = min (begin, sal.line);
829 end = max (end, sal.line);
837 /* Print the source line information for a function call history line. */
840 btrace_call_history_src_line (struct ui_out *uiout,
841 const struct btrace_function *bfun)
850 ui_out_field_string (uiout, "file",
851 symtab_to_filename_for_display (symbol_symtab (sym)));
853 btrace_compute_src_line_range (bfun, &begin, &end);
857 ui_out_text (uiout, ":");
858 ui_out_field_int (uiout, "min line", begin);
863 ui_out_text (uiout, ",");
864 ui_out_field_int (uiout, "max line", end);
867 /* Get the name of a branch trace function. */
870 btrace_get_bfun_name (const struct btrace_function *bfun)
872 struct minimal_symbol *msym;
882 return SYMBOL_PRINT_NAME (sym);
883 else if (msym != NULL)
884 return MSYMBOL_PRINT_NAME (msym);
889 /* Disassemble a section of the recorded function trace. */
892 btrace_call_history (struct ui_out *uiout,
893 const struct btrace_thread_info *btinfo,
894 const struct btrace_call_iterator *begin,
895 const struct btrace_call_iterator *end,
896 enum record_print_flag flags)
898 struct btrace_call_iterator it;
900 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
901 btrace_call_number (end));
903 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
905 const struct btrace_function *bfun;
906 struct minimal_symbol *msym;
909 bfun = btrace_call_get (&it);
913 /* Print the function index. */
914 ui_out_field_uint (uiout, "index", bfun->number);
915 ui_out_text (uiout, "\t");
917 /* Indicate gaps in the trace. */
918 if (bfun->errcode != 0)
920 const struct btrace_config *conf;
922 conf = btrace_conf (btinfo);
924 /* We have trace so we must have a configuration. */
925 gdb_assert (conf != NULL);
927 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
932 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
934 int level = bfun->level + btinfo->level, i;
936 for (i = 0; i < level; ++i)
937 ui_out_text (uiout, " ");
941 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
942 else if (msym != NULL)
943 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
944 else if (!ui_out_is_mi_like_p (uiout))
945 ui_out_field_string (uiout, "function", "??");
947 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
949 ui_out_text (uiout, _("\tinst "));
950 btrace_call_history_insn_range (uiout, bfun);
953 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
955 ui_out_text (uiout, _("\tat "));
956 btrace_call_history_src_line (uiout, bfun);
959 ui_out_text (uiout, "\n");
963 /* The to_call_history method of target record-btrace. */
966 record_btrace_call_history (struct target_ops *self, int size, int flags)
968 struct btrace_thread_info *btinfo;
969 struct btrace_call_history *history;
970 struct btrace_call_iterator begin, end;
971 struct cleanup *uiout_cleanup;
972 struct ui_out *uiout;
973 unsigned int context, covered;
975 uiout = current_uiout;
976 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
978 context = abs (size);
980 error (_("Bad record function-call-history-size."));
982 btinfo = require_btrace ();
983 history = btinfo->call_history;
986 struct btrace_insn_iterator *replay;
988 DEBUG ("call-history (0x%x): %d", flags, size);
990 /* If we're replaying, we start at the replay position. Otherwise, we
991 start at the tail of the trace. */
992 replay = btinfo->replay;
995 begin.function = replay->function;
996 begin.btinfo = btinfo;
999 btrace_call_end (&begin, btinfo);
1001 /* We start from here and expand in the requested direction. Then we
1002 expand in the other direction, as well, to fill up any remaining
1007 /* We want the current position covered, as well. */
1008 covered = btrace_call_next (&end, 1);
1009 covered += btrace_call_prev (&begin, context - covered);
1010 covered += btrace_call_next (&end, context - covered);
1014 covered = btrace_call_next (&end, context);
1015 covered += btrace_call_prev (&begin, context- covered);
1020 begin = history->begin;
1023 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1024 btrace_call_number (&begin), btrace_call_number (&end));
1029 covered = btrace_call_prev (&begin, context);
1034 covered = btrace_call_next (&end, context);
1039 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1043 printf_unfiltered (_("At the start of the branch trace record.\n"));
1045 printf_unfiltered (_("At the end of the branch trace record.\n"));
1048 btrace_set_call_history (btinfo, &begin, &end);
1049 do_cleanups (uiout_cleanup);
1052 /* The to_call_history_range method of target record-btrace. */
1055 record_btrace_call_history_range (struct target_ops *self,
1056 ULONGEST from, ULONGEST to, int flags)
1058 struct btrace_thread_info *btinfo;
1059 struct btrace_call_history *history;
1060 struct btrace_call_iterator begin, end;
1061 struct cleanup *uiout_cleanup;
1062 struct ui_out *uiout;
1063 unsigned int low, high;
1066 uiout = current_uiout;
1067 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1072 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1074 /* Check for wrap-arounds. */
1075 if (low != from || high != to)
1076 error (_("Bad range."));
1079 error (_("Bad range."));
1081 btinfo = require_btrace ();
1083 found = btrace_find_call_by_number (&begin, btinfo, low);
1085 error (_("Range out of bounds."));
1087 found = btrace_find_call_by_number (&end, btinfo, high);
1090 /* Silently truncate the range. */
1091 btrace_call_end (&end, btinfo);
1095 /* We want both begin and end to be inclusive. */
1096 btrace_call_next (&end, 1);
1099 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1100 btrace_set_call_history (btinfo, &begin, &end);
1102 do_cleanups (uiout_cleanup);
1105 /* The to_call_history_from method of target record-btrace. */
1108 record_btrace_call_history_from (struct target_ops *self,
1109 ULONGEST from, int size, int flags)
1111 ULONGEST begin, end, context;
1113 context = abs (size);
1115 error (_("Bad record function-call-history-size."));
1124 begin = from - context + 1;
1129 end = from + context - 1;
1131 /* Check for wrap-around. */
1136 record_btrace_call_history_range (self, begin, end, flags);
1139 /* The to_record_is_replaying method of target record-btrace. */
1142 record_btrace_is_replaying (struct target_ops *self)
1144 struct thread_info *tp;
1146 ALL_NON_EXITED_THREADS (tp)
1147 if (btrace_is_replaying (tp))
1153 /* The to_xfer_partial method of target record-btrace. */
1155 static enum target_xfer_status
1156 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1157 const char *annex, gdb_byte *readbuf,
1158 const gdb_byte *writebuf, ULONGEST offset,
1159 ULONGEST len, ULONGEST *xfered_len)
1161 struct target_ops *t;
1163 /* Filter out requests that don't make sense during replay. */
1164 if (replay_memory_access == replay_memory_access_read_only
1165 && !record_btrace_generating_corefile
1166 && record_btrace_is_replaying (ops))
1170 case TARGET_OBJECT_MEMORY:
1172 struct target_section *section;
1174 /* We do not allow writing memory in general. */
1175 if (writebuf != NULL)
1178 return TARGET_XFER_UNAVAILABLE;
1181 /* We allow reading readonly memory. */
1182 section = target_section_by_addr (ops, offset);
1183 if (section != NULL)
1185 /* Check if the section we found is readonly. */
1186 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1187 section->the_bfd_section)
1188 & SEC_READONLY) != 0)
1190 /* Truncate the request to fit into this section. */
1191 len = min (len, section->endaddr - offset);
1197 return TARGET_XFER_UNAVAILABLE;
1202 /* Forward the request. */
1204 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1205 offset, len, xfered_len);
1208 /* The to_insert_breakpoint method of target record-btrace. */
1211 record_btrace_insert_breakpoint (struct target_ops *ops,
1212 struct gdbarch *gdbarch,
1213 struct bp_target_info *bp_tgt)
1218 /* Inserting breakpoints requires accessing memory. Allow it for the
1219 duration of this function. */
1220 old = replay_memory_access;
1221 replay_memory_access = replay_memory_access_read_write;
1226 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1228 CATCH (except, RETURN_MASK_ALL)
1230 replay_memory_access = old;
1231 throw_exception (except);
1234 replay_memory_access = old;
1239 /* The to_remove_breakpoint method of target record-btrace. */
1242 record_btrace_remove_breakpoint (struct target_ops *ops,
1243 struct gdbarch *gdbarch,
1244 struct bp_target_info *bp_tgt)
1249 /* Removing breakpoints requires accessing memory. Allow it for the
1250 duration of this function. */
1251 old = replay_memory_access;
1252 replay_memory_access = replay_memory_access_read_write;
1257 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1259 CATCH (except, RETURN_MASK_ALL)
1261 replay_memory_access = old;
1262 throw_exception (except);
1265 replay_memory_access = old;
1270 /* The to_fetch_registers method of target record-btrace. */
1273 record_btrace_fetch_registers (struct target_ops *ops,
1274 struct regcache *regcache, int regno)
1276 struct btrace_insn_iterator *replay;
1277 struct thread_info *tp;
1279 tp = find_thread_ptid (inferior_ptid);
1280 gdb_assert (tp != NULL);
1282 replay = tp->btrace.replay;
1283 if (replay != NULL && !record_btrace_generating_corefile)
1285 const struct btrace_insn *insn;
1286 struct gdbarch *gdbarch;
1289 gdbarch = get_regcache_arch (regcache);
1290 pcreg = gdbarch_pc_regnum (gdbarch);
1294 /* We can only provide the PC register. */
1295 if (regno >= 0 && regno != pcreg)
1298 insn = btrace_insn_get (replay);
1299 gdb_assert (insn != NULL);
1301 regcache_raw_supply (regcache, regno, &insn->pc);
1305 struct target_ops *t = ops->beneath;
1307 t->to_fetch_registers (t, regcache, regno);
1311 /* The to_store_registers method of target record-btrace. */
1314 record_btrace_store_registers (struct target_ops *ops,
1315 struct regcache *regcache, int regno)
1317 struct target_ops *t;
1319 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1320 error (_("This record target does not allow writing registers."));
1322 gdb_assert (may_write_registers != 0);
1325 t->to_store_registers (t, regcache, regno);
1328 /* The to_prepare_to_store method of target record-btrace. */
1331 record_btrace_prepare_to_store (struct target_ops *ops,
1332 struct regcache *regcache)
1334 struct target_ops *t;
1336 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1340 t->to_prepare_to_store (t, regcache);
1343 /* The branch trace frame cache. */
1345 struct btrace_frame_cache
1348 struct thread_info *tp;
1350 /* The frame info. */
1351 struct frame_info *frame;
1353 /* The branch trace function segment. */
1354 const struct btrace_function *bfun;
1357 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1359 static htab_t bfcache;
1361 /* hash_f for htab_create_alloc of bfcache. */
1364 bfcache_hash (const void *arg)
1366 const struct btrace_frame_cache *cache = arg;
1368 return htab_hash_pointer (cache->frame);
1371 /* eq_f for htab_create_alloc of bfcache. */
1374 bfcache_eq (const void *arg1, const void *arg2)
1376 const struct btrace_frame_cache *cache1 = arg1;
1377 const struct btrace_frame_cache *cache2 = arg2;
1379 return cache1->frame == cache2->frame;
1382 /* Create a new btrace frame cache. */
1384 static struct btrace_frame_cache *
1385 bfcache_new (struct frame_info *frame)
1387 struct btrace_frame_cache *cache;
1390 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1391 cache->frame = frame;
1393 slot = htab_find_slot (bfcache, cache, INSERT);
1394 gdb_assert (*slot == NULL);
1400 /* Extract the branch trace function from a branch trace frame. */
1402 static const struct btrace_function *
1403 btrace_get_frame_function (struct frame_info *frame)
1405 const struct btrace_frame_cache *cache;
1406 const struct btrace_function *bfun;
1407 struct btrace_frame_cache pattern;
1410 pattern.frame = frame;
1412 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1420 /* Implement stop_reason method for record_btrace_frame_unwind. */
1422 static enum unwind_stop_reason
1423 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1426 const struct btrace_frame_cache *cache;
1427 const struct btrace_function *bfun;
1429 cache = *this_cache;
1431 gdb_assert (bfun != NULL);
1433 if (bfun->up == NULL)
1434 return UNWIND_UNAVAILABLE;
1436 return UNWIND_NO_REASON;
1439 /* Implement this_id method for record_btrace_frame_unwind. */
1442 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1443 struct frame_id *this_id)
1445 const struct btrace_frame_cache *cache;
1446 const struct btrace_function *bfun;
1447 CORE_ADDR code, special;
1449 cache = *this_cache;
1452 gdb_assert (bfun != NULL);
1454 while (bfun->segment.prev != NULL)
1455 bfun = bfun->segment.prev;
1457 code = get_frame_func (this_frame);
1458 special = bfun->number;
1460 *this_id = frame_id_build_unavailable_stack_special (code, special);
1462 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1463 btrace_get_bfun_name (cache->bfun),
1464 core_addr_to_string_nz (this_id->code_addr),
1465 core_addr_to_string_nz (this_id->special_addr));
1468 /* Implement prev_register method for record_btrace_frame_unwind. */
1470 static struct value *
1471 record_btrace_frame_prev_register (struct frame_info *this_frame,
1475 const struct btrace_frame_cache *cache;
1476 const struct btrace_function *bfun, *caller;
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1482 gdbarch = get_frame_arch (this_frame);
1483 pcreg = gdbarch_pc_regnum (gdbarch);
1484 if (pcreg < 0 || regnum != pcreg)
1485 throw_error (NOT_AVAILABLE_ERROR,
1486 _("Registers are not available in btrace record history"));
1488 cache = *this_cache;
1490 gdb_assert (bfun != NULL);
1494 throw_error (NOT_AVAILABLE_ERROR,
1495 _("No caller in btrace record history"));
1497 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1499 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1504 insn = VEC_last (btrace_insn_s, caller->insn);
1507 pc += gdb_insn_length (gdbarch, pc);
1510 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1511 btrace_get_bfun_name (bfun), bfun->level,
1512 core_addr_to_string_nz (pc));
1514 return frame_unwind_got_address (this_frame, regnum, pc);
1517 /* Implement sniffer method for record_btrace_frame_unwind. */
1520 record_btrace_frame_sniffer (const struct frame_unwind *self,
1521 struct frame_info *this_frame,
1524 const struct btrace_function *bfun;
1525 struct btrace_frame_cache *cache;
1526 struct thread_info *tp;
1527 struct frame_info *next;
1529 /* THIS_FRAME does not contain a reference to its thread. */
1530 tp = find_thread_ptid (inferior_ptid);
1531 gdb_assert (tp != NULL);
1534 next = get_next_frame (this_frame);
1537 const struct btrace_insn_iterator *replay;
1539 replay = tp->btrace.replay;
1541 bfun = replay->function;
1545 const struct btrace_function *callee;
1547 callee = btrace_get_frame_function (next);
1548 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1555 DEBUG ("[frame] sniffed frame for %s on level %d",
1556 btrace_get_bfun_name (bfun), bfun->level);
1558 /* This is our frame. Initialize the frame cache. */
1559 cache = bfcache_new (this_frame);
1563 *this_cache = cache;
1567 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1570 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1571 struct frame_info *this_frame,
1574 const struct btrace_function *bfun, *callee;
1575 struct btrace_frame_cache *cache;
1576 struct frame_info *next;
1578 next = get_next_frame (this_frame);
1582 callee = btrace_get_frame_function (next);
1586 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1593 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1594 btrace_get_bfun_name (bfun), bfun->level);
1596 /* This is our frame. Initialize the frame cache. */
1597 cache = bfcache_new (this_frame);
1598 cache->tp = find_thread_ptid (inferior_ptid);
1601 *this_cache = cache;
1606 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1608 struct btrace_frame_cache *cache;
1613 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1614 gdb_assert (slot != NULL);
1616 htab_remove_elt (bfcache, cache);
1619 /* btrace recording does not store previous memory content, neither the stack
1620 frames content. Any unwinding would return errorneous results as the stack
1621 contents no longer matches the changed PC value restored from history.
1622 Therefore this unwinder reports any possibly unwound registers as
1625 const struct frame_unwind record_btrace_frame_unwind =
1628 record_btrace_frame_unwind_stop_reason,
1629 record_btrace_frame_this_id,
1630 record_btrace_frame_prev_register,
1632 record_btrace_frame_sniffer,
1633 record_btrace_frame_dealloc_cache
1636 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1639 record_btrace_frame_unwind_stop_reason,
1640 record_btrace_frame_this_id,
1641 record_btrace_frame_prev_register,
1643 record_btrace_tailcall_frame_sniffer,
1644 record_btrace_frame_dealloc_cache
1647 /* Implement the to_get_unwinder method. */
1649 static const struct frame_unwind *
1650 record_btrace_to_get_unwinder (struct target_ops *self)
1652 return &record_btrace_frame_unwind;
1655 /* Implement the to_get_tailcall_unwinder method. */
1657 static const struct frame_unwind *
1658 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1660 return &record_btrace_tailcall_frame_unwind;
1663 /* Return a human-readable string for FLAG. */
1666 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1674 return "reverse-step";
1680 return "reverse-cont";
1689 /* Indicate that TP should be resumed according to FLAG. */
1692 record_btrace_resume_thread (struct thread_info *tp,
1693 enum btrace_thread_flag flag)
1695 struct btrace_thread_info *btinfo;
1697 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1698 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1700 btinfo = &tp->btrace;
1702 /* Fetch the latest branch trace. */
1705 /* A resume request overwrites a preceding resume or stop request. */
1706 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1707 btinfo->flags |= flag;
1710 /* Get the current frame for TP. */
1712 static struct frame_info *
1713 get_thread_current_frame (struct thread_info *tp)
1715 struct frame_info *frame;
1716 ptid_t old_inferior_ptid;
1719 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1720 old_inferior_ptid = inferior_ptid;
1721 inferior_ptid = tp->ptid;
1723 /* Clear the executing flag to allow changes to the current frame.
1724 We are not actually running, yet. We just started a reverse execution
1725 command or a record goto command.
1726 For the latter, EXECUTING is false and this has no effect.
1727 For the former, EXECUTING is true and we're in to_wait, about to
1728 move the thread. Since we need to recompute the stack, we temporarily
1729 set EXECUTING to flase. */
1730 executing = is_executing (inferior_ptid);
1731 set_executing (inferior_ptid, 0);
1736 frame = get_current_frame ();
1738 CATCH (except, RETURN_MASK_ALL)
1740 /* Restore the previous execution state. */
1741 set_executing (inferior_ptid, executing);
1743 /* Restore the previous inferior_ptid. */
1744 inferior_ptid = old_inferior_ptid;
1746 throw_exception (except);
1750 /* Restore the previous execution state. */
1751 set_executing (inferior_ptid, executing);
1753 /* Restore the previous inferior_ptid. */
1754 inferior_ptid = old_inferior_ptid;
1759 /* Start replaying a thread. */
1761 static struct btrace_insn_iterator *
1762 record_btrace_start_replaying (struct thread_info *tp)
1764 struct btrace_insn_iterator *replay;
1765 struct btrace_thread_info *btinfo;
1767 btinfo = &tp->btrace;
1770 /* We can't start replaying without trace. */
1771 if (btinfo->begin == NULL)
1774 /* GDB stores the current frame_id when stepping in order to detects steps
1776 Since frames are computed differently when we're replaying, we need to
1777 recompute those stored frames and fix them up so we can still detect
1778 subroutines after we started replaying. */
1781 struct frame_info *frame;
1782 struct frame_id frame_id;
1783 int upd_step_frame_id, upd_step_stack_frame_id;
1785 /* The current frame without replaying - computed via normal unwind. */
1786 frame = get_thread_current_frame (tp);
1787 frame_id = get_frame_id (frame);
1789 /* Check if we need to update any stepping-related frame id's. */
1790 upd_step_frame_id = frame_id_eq (frame_id,
1791 tp->control.step_frame_id);
1792 upd_step_stack_frame_id = frame_id_eq (frame_id,
1793 tp->control.step_stack_frame_id);
1795 /* We start replaying at the end of the branch trace. This corresponds
1796 to the current instruction. */
1797 replay = XNEW (struct btrace_insn_iterator);
1798 btrace_insn_end (replay, btinfo);
1800 /* Skip gaps at the end of the trace. */
1801 while (btrace_insn_get (replay) == NULL)
1805 steps = btrace_insn_prev (replay, 1);
1807 error (_("No trace."));
1810 /* We're not replaying, yet. */
1811 gdb_assert (btinfo->replay == NULL);
1812 btinfo->replay = replay;
1814 /* Make sure we're not using any stale registers. */
1815 registers_changed_ptid (tp->ptid);
1817 /* The current frame with replaying - computed via btrace unwind. */
1818 frame = get_thread_current_frame (tp);
1819 frame_id = get_frame_id (frame);
1821 /* Replace stepping related frames where necessary. */
1822 if (upd_step_frame_id)
1823 tp->control.step_frame_id = frame_id;
1824 if (upd_step_stack_frame_id)
1825 tp->control.step_stack_frame_id = frame_id;
1827 CATCH (except, RETURN_MASK_ALL)
1829 xfree (btinfo->replay);
1830 btinfo->replay = NULL;
1832 registers_changed_ptid (tp->ptid);
1834 throw_exception (except);
1841 /* Stop replaying a thread. */
1844 record_btrace_stop_replaying (struct thread_info *tp)
1846 struct btrace_thread_info *btinfo;
1848 btinfo = &tp->btrace;
1850 xfree (btinfo->replay);
1851 btinfo->replay = NULL;
1853 /* Make sure we're not leaving any stale registers. */
1854 registers_changed_ptid (tp->ptid);
1857 /* Stop replaying TP if it is at the end of its execution history. */
1860 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1862 struct btrace_insn_iterator *replay, end;
1863 struct btrace_thread_info *btinfo;
1865 btinfo = &tp->btrace;
1866 replay = btinfo->replay;
1871 btrace_insn_end (&end, btinfo);
1873 if (btrace_insn_cmp (replay, &end) == 0)
1874 record_btrace_stop_replaying (tp);
1877 /* The to_resume method of target record-btrace. */
1880 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1881 enum gdb_signal signal)
1883 struct thread_info *tp;
1884 enum btrace_thread_flag flag;
1887 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1888 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1889 step ? "step" : "cont");
1893 /* Store the execution direction of the last resume.
1895 If there is more than one to_resume call, we have to rely on infrun
1896 to not change the execution direction in-between. */
1897 record_btrace_resume_exec_dir = execution_direction;
1899 /* For all-stop targets... */
1900 if (!target_is_non_stop_p ())
1902 /* ...we pick the current thread when asked to resume an entire process
1904 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1905 ptid = inferior_ptid;
1907 tp = find_thread_ptid (ptid);
1909 error (_("Cannot find thread to resume."));
1911 /* ...and we stop replaying other threads if the thread to resume is not
1913 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1914 ALL_NON_EXITED_THREADS (tp)
1915 record_btrace_stop_replaying (tp);
1918 /* As long as we're not replaying, just forward the request.
1920 For non-stop targets this means that no thread is replaying. In order to
1921 make progress, we may need to explicitly move replaying threads to the end
1922 of their execution history. */
1923 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1926 return ops->to_resume (ops, orig_ptid, step, signal);
1929 /* Compute the btrace thread flag for the requested move. */
1931 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1933 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1935 /* We just indicate the resume intent here. The actual stepping happens in
1936 record_btrace_wait below. */
1937 ALL_NON_EXITED_THREADS (tp)
1938 if (ptid_match (tp->ptid, ptid))
1939 record_btrace_resume_thread (tp, flag);
1941 /* Async support. */
1942 if (target_can_async_p ())
1945 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1949 /* Cancel resuming TP. */
1952 record_btrace_cancel_resume (struct thread_info *tp)
1954 enum btrace_thread_flag flags;
1956 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1960 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1961 target_pid_to_str (tp->ptid), flags,
1962 btrace_thread_flag_to_str (flags));
1964 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1965 record_btrace_stop_replaying_at_end (tp);
1968 /* Return a target_waitstatus indicating that we ran out of history. */
1970 static struct target_waitstatus
1971 btrace_step_no_history (void)
1973 struct target_waitstatus status;
1975 status.kind = TARGET_WAITKIND_NO_HISTORY;
1980 /* Return a target_waitstatus indicating that a step finished. */
1982 static struct target_waitstatus
1983 btrace_step_stopped (void)
1985 struct target_waitstatus status;
1987 status.kind = TARGET_WAITKIND_STOPPED;
1988 status.value.sig = GDB_SIGNAL_TRAP;
1993 /* Return a target_waitstatus indicating that a thread was stopped as
1996 static struct target_waitstatus
1997 btrace_step_stopped_on_request (void)
1999 struct target_waitstatus status;
2001 status.kind = TARGET_WAITKIND_STOPPED;
2002 status.value.sig = GDB_SIGNAL_0;
2007 /* Return a target_waitstatus indicating a spurious stop. */
2009 static struct target_waitstatus
2010 btrace_step_spurious (void)
2012 struct target_waitstatus status;
2014 status.kind = TARGET_WAITKIND_SPURIOUS;
2019 /* Return a target_waitstatus indicating that the thread was not resumed. */
2021 static struct target_waitstatus
2022 btrace_step_no_resumed (void)
2024 struct target_waitstatus status;
2026 status.kind = TARGET_WAITKIND_NO_RESUMED;
2031 /* Return a target_waitstatus indicating that we should wait again. */
2033 static struct target_waitstatus
2034 btrace_step_again (void)
2036 struct target_waitstatus status;
2038 status.kind = TARGET_WAITKIND_IGNORE;
2043 /* Clear the record histories. */
2046 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2048 xfree (btinfo->insn_history);
2049 xfree (btinfo->call_history);
2051 btinfo->insn_history = NULL;
2052 btinfo->call_history = NULL;
2055 /* Check whether TP's current replay position is at a breakpoint. */
2058 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2060 struct btrace_insn_iterator *replay;
2061 struct btrace_thread_info *btinfo;
2062 const struct btrace_insn *insn;
2063 struct inferior *inf;
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2071 insn = btrace_insn_get (replay);
2075 inf = find_inferior_ptid (tp->ptid);
2079 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2080 &btinfo->stop_reason);
2083 /* Step one instruction in forward direction. */
2085 static struct target_waitstatus
2086 record_btrace_single_step_forward (struct thread_info *tp)
2088 struct btrace_insn_iterator *replay, end;
2089 struct btrace_thread_info *btinfo;
2091 btinfo = &tp->btrace;
2092 replay = btinfo->replay;
2094 /* We're done if we're not replaying. */
2096 return btrace_step_no_history ();
2098 /* Check if we're stepping a breakpoint. */
2099 if (record_btrace_replay_at_breakpoint (tp))
2100 return btrace_step_stopped ();
2102 /* Skip gaps during replay. */
2107 /* We will bail out here if we continue stepping after reaching the end
2108 of the execution history. */
2109 steps = btrace_insn_next (replay, 1);
2111 return btrace_step_no_history ();
2113 while (btrace_insn_get (replay) == NULL);
2115 /* Determine the end of the instruction trace. */
2116 btrace_insn_end (&end, btinfo);
2118 /* The execution trace contains (and ends with) the current instruction.
2119 This instruction has not been executed, yet, so the trace really ends
2120 one instruction earlier. */
2121 if (btrace_insn_cmp (replay, &end) == 0)
2122 return btrace_step_no_history ();
2124 return btrace_step_spurious ();
2127 /* Step one instruction in backward direction. */
2129 static struct target_waitstatus
2130 record_btrace_single_step_backward (struct thread_info *tp)
2132 struct btrace_insn_iterator *replay;
2133 struct btrace_thread_info *btinfo;
2135 btinfo = &tp->btrace;
2136 replay = btinfo->replay;
2138 /* Start replaying if we're not already doing so. */
2140 replay = record_btrace_start_replaying (tp);
2142 /* If we can't step any further, we reached the end of the history.
2143 Skip gaps during replay. */
2148 steps = btrace_insn_prev (replay, 1);
2150 return btrace_step_no_history ();
2152 while (btrace_insn_get (replay) == NULL);
2154 /* Check if we're stepping a breakpoint.
2156 For reverse-stepping, this check is after the step. There is logic in
2157 infrun.c that handles reverse-stepping separately. See, for example,
2158 proceed and adjust_pc_after_break.
2160 This code assumes that for reverse-stepping, PC points to the last
2161 de-executed instruction, whereas for forward-stepping PC points to the
2162 next to-be-executed instruction. */
2163 if (record_btrace_replay_at_breakpoint (tp))
2164 return btrace_step_stopped ();
2166 return btrace_step_spurious ();
2169 /* Step a single thread. */
2171 static struct target_waitstatus
2172 record_btrace_step_thread (struct thread_info *tp)
2174 struct btrace_thread_info *btinfo;
2175 struct target_waitstatus status;
2176 enum btrace_thread_flag flags;
2178 btinfo = &tp->btrace;
2180 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2181 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2183 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2184 target_pid_to_str (tp->ptid), flags,
2185 btrace_thread_flag_to_str (flags));
2187 /* We can't step without an execution history. */
2188 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2189 return btrace_step_no_history ();
2194 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2197 return btrace_step_stopped_on_request ();
2200 status = record_btrace_single_step_forward (tp);
2201 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2204 return btrace_step_stopped ();
2207 status = record_btrace_single_step_backward (tp);
2208 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2211 return btrace_step_stopped ();
2214 status = record_btrace_single_step_forward (tp);
2215 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2218 btinfo->flags |= flags;
2219 return btrace_step_again ();
2222 status = record_btrace_single_step_backward (tp);
2223 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2226 btinfo->flags |= flags;
2227 return btrace_step_again ();
2230 /* We keep threads moving at the end of their execution history. The to_wait
2231 method will stop the thread for whom the event is reported. */
2232 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2233 btinfo->flags |= flags;
2238 /* A vector of threads. */
2240 typedef struct thread_info * tp_t;
2243 /* Announce further events if necessary. */
2246 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2247 const VEC (tp_t) *no_history)
2249 int more_moving, more_no_history;
2251 more_moving = !VEC_empty (tp_t, moving);
2252 more_no_history = !VEC_empty (tp_t, no_history);
2254 if (!more_moving && !more_no_history)
2258 DEBUG ("movers pending");
2260 if (more_no_history)
2261 DEBUG ("no-history pending");
2263 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2266 /* The to_wait method of target record-btrace. */
2269 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2270 struct target_waitstatus *status, int options)
2272 VEC (tp_t) *moving, *no_history;
2273 struct thread_info *tp, *eventing;
2274 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2276 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2278 /* As long as we're not replaying, just forward the request. */
2279 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2282 return ops->to_wait (ops, ptid, status, options);
2288 make_cleanup (VEC_cleanup (tp_t), &moving);
2289 make_cleanup (VEC_cleanup (tp_t), &no_history);
2291 /* Keep a work list of moving threads. */
2292 ALL_NON_EXITED_THREADS (tp)
2293 if (ptid_match (tp->ptid, ptid)
2294 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2295 VEC_safe_push (tp_t, moving, tp);
2297 if (VEC_empty (tp_t, moving))
2299 *status = btrace_step_no_resumed ();
2301 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2302 target_waitstatus_to_string (status));
2304 do_cleanups (cleanups);
2308 /* Step moving threads one by one, one step each, until either one thread
2309 reports an event or we run out of threads to step.
2311 When stepping more than one thread, chances are that some threads reach
2312 the end of their execution history earlier than others. If we reported
2313 this immediately, all-stop on top of non-stop would stop all threads and
2314 resume the same threads next time. And we would report the same thread
2315 having reached the end of its execution history again.
2317 In the worst case, this would starve the other threads. But even if other
2318 threads would be allowed to make progress, this would result in far too
2319 many intermediate stops.
2321 We therefore delay the reporting of "no execution history" until we have
2322 nothing else to report. By this time, all threads should have moved to
2323 either the beginning or the end of their execution history. There will
2324 be a single user-visible stop. */
2326 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2331 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2333 *status = record_btrace_step_thread (tp);
2335 switch (status->kind)
2337 case TARGET_WAITKIND_IGNORE:
2341 case TARGET_WAITKIND_NO_HISTORY:
2342 VEC_safe_push (tp_t, no_history,
2343 VEC_ordered_remove (tp_t, moving, ix));
2347 eventing = VEC_unordered_remove (tp_t, moving, ix);
2353 if (eventing == NULL)
2355 /* We started with at least one moving thread. This thread must have
2356 either stopped or reached the end of its execution history.
2358 In the former case, EVENTING must not be NULL.
2359 In the latter case, NO_HISTORY must not be empty. */
2360 gdb_assert (!VEC_empty (tp_t, no_history));
2362 /* We kept threads moving at the end of their execution history. Stop
2363 EVENTING now that we are going to report its stop. */
2364 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2365 eventing->btrace.flags &= ~BTHR_MOVE;
2367 *status = btrace_step_no_history ();
2370 gdb_assert (eventing != NULL);
2372 /* We kept threads replaying at the end of their execution history. Stop
2373 replaying EVENTING now that we are going to report its stop. */
2374 record_btrace_stop_replaying_at_end (eventing);
2376 /* Stop all other threads. */
2377 if (!target_is_non_stop_p ())
2378 ALL_NON_EXITED_THREADS (tp)
2379 record_btrace_cancel_resume (tp);
2381 /* In async mode, we need to announce further events. */
2382 if (target_is_async_p ())
2383 record_btrace_maybe_mark_async_event (moving, no_history);
2385 /* Start record histories anew from the current position. */
2386 record_btrace_clear_histories (&eventing->btrace);
2388 /* We moved the replay position but did not update registers. */
2389 registers_changed_ptid (eventing->ptid);
2391 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2392 target_pid_to_str (eventing->ptid),
2393 target_waitstatus_to_string (status));
2395 do_cleanups (cleanups);
2396 return eventing->ptid;
2399 /* The to_stop method of target record-btrace. */
2402 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2404 DEBUG ("stop %s", target_pid_to_str (ptid));
2406 /* As long as we're not replaying, just forward the request. */
2407 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2410 ops->to_stop (ops, ptid);
2414 struct thread_info *tp;
2416 ALL_NON_EXITED_THREADS (tp)
2417 if (ptid_match (tp->ptid, ptid))
2419 tp->btrace.flags &= ~BTHR_MOVE;
2420 tp->btrace.flags |= BTHR_STOP;
2425 /* The to_can_execute_reverse method of target record-btrace. */
2428 record_btrace_can_execute_reverse (struct target_ops *self)
2433 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2436 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2438 if (record_btrace_is_replaying (ops))
2440 struct thread_info *tp = inferior_thread ();
2442 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2445 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2448 /* The to_supports_stopped_by_sw_breakpoint method of target
2452 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2454 if (record_btrace_is_replaying (ops))
2457 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2460 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2463 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2465 if (record_btrace_is_replaying (ops))
2467 struct thread_info *tp = inferior_thread ();
2469 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2472 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2475 /* The to_supports_stopped_by_hw_breakpoint method of target
2479 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2481 if (record_btrace_is_replaying (ops))
2484 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2487 /* The to_update_thread_list method of target record-btrace. */
2490 record_btrace_update_thread_list (struct target_ops *ops)
2492 /* We don't add or remove threads during replay. */
2493 if (record_btrace_is_replaying (ops))
2496 /* Forward the request. */
2498 ops->to_update_thread_list (ops);
2501 /* The to_thread_alive method of target record-btrace. */
2504 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2506 /* We don't add or remove threads during replay. */
2507 if (record_btrace_is_replaying (ops))
2508 return find_thread_ptid (ptid) != NULL;
2510 /* Forward the request. */
2512 return ops->to_thread_alive (ops, ptid);
2515 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2519 record_btrace_set_replay (struct thread_info *tp,
2520 const struct btrace_insn_iterator *it)
2522 struct btrace_thread_info *btinfo;
2524 btinfo = &tp->btrace;
2526 if (it == NULL || it->function == NULL)
2527 record_btrace_stop_replaying (tp);
2530 if (btinfo->replay == NULL)
2531 record_btrace_start_replaying (tp);
2532 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2535 *btinfo->replay = *it;
2536 registers_changed_ptid (tp->ptid);
2539 /* Start anew from the new replay position. */
2540 record_btrace_clear_histories (btinfo);
2542 stop_pc = regcache_read_pc (get_current_regcache ());
2543 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2546 /* The to_goto_record_begin method of target record-btrace. */
2549 record_btrace_goto_begin (struct target_ops *self)
2551 struct thread_info *tp;
2552 struct btrace_insn_iterator begin;
2554 tp = require_btrace_thread ();
2556 btrace_insn_begin (&begin, &tp->btrace);
2557 record_btrace_set_replay (tp, &begin);
2560 /* The to_goto_record_end method of target record-btrace. */
2563 record_btrace_goto_end (struct target_ops *ops)
2565 struct thread_info *tp;
2567 tp = require_btrace_thread ();
2569 record_btrace_set_replay (tp, NULL);
2572 /* The to_goto_record method of target record-btrace. */
2575 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2577 struct thread_info *tp;
2578 struct btrace_insn_iterator it;
2579 unsigned int number;
2584 /* Check for wrap-arounds. */
2586 error (_("Instruction number out of range."));
2588 tp = require_btrace_thread ();
2590 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2592 error (_("No such instruction."));
2594 record_btrace_set_replay (tp, &it);
2597 /* The to_execution_direction target method. */
2599 static enum exec_direction_kind
2600 record_btrace_execution_direction (struct target_ops *self)
2602 return record_btrace_resume_exec_dir;
2605 /* The to_prepare_to_generate_core target method. */
2608 record_btrace_prepare_to_generate_core (struct target_ops *self)
2610 record_btrace_generating_corefile = 1;
2613 /* The to_done_generating_core target method. */
2616 record_btrace_done_generating_core (struct target_ops *self)
2618 record_btrace_generating_corefile = 0;
2621 /* Initialize the record-btrace target ops. */
2624 init_record_btrace_ops (void)
2626 struct target_ops *ops;
2628 ops = &record_btrace_ops;
2629 ops->to_shortname = "record-btrace";
2630 ops->to_longname = "Branch tracing target";
2631 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2632 ops->to_open = record_btrace_open;
2633 ops->to_close = record_btrace_close;
2634 ops->to_async = record_btrace_async;
2635 ops->to_detach = record_detach;
2636 ops->to_disconnect = record_disconnect;
2637 ops->to_mourn_inferior = record_mourn_inferior;
2638 ops->to_kill = record_kill;
2639 ops->to_stop_recording = record_btrace_stop_recording;
2640 ops->to_info_record = record_btrace_info;
2641 ops->to_insn_history = record_btrace_insn_history;
2642 ops->to_insn_history_from = record_btrace_insn_history_from;
2643 ops->to_insn_history_range = record_btrace_insn_history_range;
2644 ops->to_call_history = record_btrace_call_history;
2645 ops->to_call_history_from = record_btrace_call_history_from;
2646 ops->to_call_history_range = record_btrace_call_history_range;
2647 ops->to_record_is_replaying = record_btrace_is_replaying;
2648 ops->to_xfer_partial = record_btrace_xfer_partial;
2649 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2650 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2651 ops->to_fetch_registers = record_btrace_fetch_registers;
2652 ops->to_store_registers = record_btrace_store_registers;
2653 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2654 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2655 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2656 ops->to_resume = record_btrace_resume;
2657 ops->to_wait = record_btrace_wait;
2658 ops->to_stop = record_btrace_stop;
2659 ops->to_update_thread_list = record_btrace_update_thread_list;
2660 ops->to_thread_alive = record_btrace_thread_alive;
2661 ops->to_goto_record_begin = record_btrace_goto_begin;
2662 ops->to_goto_record_end = record_btrace_goto_end;
2663 ops->to_goto_record = record_btrace_goto;
2664 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2665 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2666 ops->to_supports_stopped_by_sw_breakpoint
2667 = record_btrace_supports_stopped_by_sw_breakpoint;
2668 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2669 ops->to_supports_stopped_by_hw_breakpoint
2670 = record_btrace_supports_stopped_by_hw_breakpoint;
2671 ops->to_execution_direction = record_btrace_execution_direction;
2672 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2673 ops->to_done_generating_core = record_btrace_done_generating_core;
2674 ops->to_stratum = record_stratum;
2675 ops->to_magic = OPS_MAGIC;
2678 /* Start recording in BTS format. */
2681 cmd_record_btrace_bts_start (char *args, int from_tty)
2683 if (args != NULL && *args != 0)
2684 error (_("Invalid argument."));
2686 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2690 execute_command ("target record-btrace", from_tty);
2692 CATCH (exception, RETURN_MASK_ALL)
2694 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2695 throw_exception (exception);
2700 /* Start recording Intel(R) Processor Trace. */
2703 cmd_record_btrace_pt_start (char *args, int from_tty)
2705 if (args != NULL && *args != 0)
2706 error (_("Invalid argument."));
2708 record_btrace_conf.format = BTRACE_FORMAT_PT;
2712 execute_command ("target record-btrace", from_tty);
2714 CATCH (exception, RETURN_MASK_ALL)
2716 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2717 throw_exception (exception);
2722 /* Alias for "target record". */
2725 cmd_record_btrace_start (char *args, int from_tty)
2727 if (args != NULL && *args != 0)
2728 error (_("Invalid argument."));
2730 record_btrace_conf.format = BTRACE_FORMAT_PT;
2734 execute_command ("target record-btrace", from_tty);
2736 CATCH (exception, RETURN_MASK_ALL)
2738 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2742 execute_command ("target record-btrace", from_tty);
2744 CATCH (exception, RETURN_MASK_ALL)
2746 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2747 throw_exception (exception);
2754 /* The "set record btrace" command. */
2757 cmd_set_record_btrace (char *args, int from_tty)
2759 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2762 /* The "show record btrace" command. */
2765 cmd_show_record_btrace (char *args, int from_tty)
2767 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2770 /* The "show record btrace replay-memory-access" command. */
2773 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2774 struct cmd_list_element *c, const char *value)
2776 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2777 replay_memory_access);
2780 /* The "set record btrace bts" command. */
2783 cmd_set_record_btrace_bts (char *args, int from_tty)
2785 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2786 "by an appropriate subcommand.\n"));
2787 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2788 all_commands, gdb_stdout);
2791 /* The "show record btrace bts" command. */
2794 cmd_show_record_btrace_bts (char *args, int from_tty)
2796 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2799 /* The "set record btrace pt" command. */
2802 cmd_set_record_btrace_pt (char *args, int from_tty)
2804 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2805 "by an appropriate subcommand.\n"));
2806 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2807 all_commands, gdb_stdout);
2810 /* The "show record btrace pt" command. */
2813 cmd_show_record_btrace_pt (char *args, int from_tty)
2815 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2818 /* The "record bts buffer-size" show value function. */
2821 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2822 struct cmd_list_element *c,
2825 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2829 /* The "record pt buffer-size" show value function. */
2832 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2833 struct cmd_list_element *c,
2836 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2840 void _initialize_record_btrace (void);
2842 /* Initialize btrace commands. */
2845 _initialize_record_btrace (void)
2847 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2848 _("Start branch trace recording."), &record_btrace_cmdlist,
2849 "record btrace ", 0, &record_cmdlist);
2850 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2852 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2854 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2855 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2856 This format may not be available on all processors."),
2857 &record_btrace_cmdlist);
2858 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2860 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2862 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2863 This format may not be available on all processors."),
2864 &record_btrace_cmdlist);
2865 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2867 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2868 _("Set record options"), &set_record_btrace_cmdlist,
2869 "set record btrace ", 0, &set_record_cmdlist);
2871 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2872 _("Show record options"), &show_record_btrace_cmdlist,
2873 "show record btrace ", 0, &show_record_cmdlist);
2875 add_setshow_enum_cmd ("replay-memory-access", no_class,
2876 replay_memory_access_types, &replay_memory_access, _("\
2877 Set what memory accesses are allowed during replay."), _("\
2878 Show what memory accesses are allowed during replay."),
2879 _("Default is READ-ONLY.\n\n\
2880 The btrace record target does not trace data.\n\
2881 The memory therefore corresponds to the live target and not \
2882 to the current replay position.\n\n\
2883 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2884 When READ-WRITE, allow accesses to read-only and read-write memory during \
2886 NULL, cmd_show_replay_memory_access,
2887 &set_record_btrace_cmdlist,
2888 &show_record_btrace_cmdlist);
2890 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2891 _("Set record btrace bts options"),
2892 &set_record_btrace_bts_cmdlist,
2893 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2895 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2896 _("Show record btrace bts options"),
2897 &show_record_btrace_bts_cmdlist,
2898 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2900 add_setshow_uinteger_cmd ("buffer-size", no_class,
2901 &record_btrace_conf.bts.size,
2902 _("Set the record/replay bts buffer size."),
2903 _("Show the record/replay bts buffer size."), _("\
2904 When starting recording request a trace buffer of this size. \
2905 The actual buffer size may differ from the requested size. \
2906 Use \"info record\" to see the actual buffer size.\n\n\
2907 Bigger buffers allow longer recording but also take more time to process \
2908 the recorded execution trace.\n\n\
2909 The trace buffer size may not be changed while recording."), NULL,
2910 show_record_bts_buffer_size_value,
2911 &set_record_btrace_bts_cmdlist,
2912 &show_record_btrace_bts_cmdlist);
2914 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2915 _("Set record btrace pt options"),
2916 &set_record_btrace_pt_cmdlist,
2917 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2919 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2920 _("Show record btrace pt options"),
2921 &show_record_btrace_pt_cmdlist,
2922 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2924 add_setshow_uinteger_cmd ("buffer-size", no_class,
2925 &record_btrace_conf.pt.size,
2926 _("Set the record/replay pt buffer size."),
2927 _("Show the record/replay pt buffer size."), _("\
2928 Bigger buffers allow longer recording but also take more time to process \
2929 the recorded execution.\n\
2930 The actual buffer size may differ from the requested size. Use \"info record\" \
2931 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2932 &set_record_btrace_pt_cmdlist,
2933 &show_record_btrace_pt_cmdlist);
2935 init_record_btrace_ops ();
2936 add_target (&record_btrace_ops);
2938 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2941 record_btrace_conf.bts.size = 64 * 1024;
2942 record_btrace_conf.pt.size = 16 * 1024;