1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info *
110 require_btrace_thread (void)
112 struct thread_info *tp;
116 tp = find_thread_ptid (inferior_ptid);
118 error (_("No thread."));
122 if (btrace_is_empty (tp))
123 error (_("No trace."));
128 /* Update the branch trace for the current thread and return a pointer to its
129 branch trace information struct.
131 Throws an error if there is no thread or no trace. This function never
134 static struct btrace_thread_info *
135 require_btrace (void)
137 struct thread_info *tp;
139 tp = require_btrace_thread ();
144 /* Enable branch tracing for one thread. Warn on errors. */
147 record_btrace_enable_warn (struct thread_info *tp)
151 btrace_enable (tp, &record_btrace_conf);
153 CATCH (error, RETURN_MASK_ERROR)
155 warning ("%s", error.message);
160 /* Callback function to disable branch tracing for one thread. */
163 record_btrace_disable_callback (void *arg)
165 struct thread_info *tp = (struct thread_info *) arg;
170 /* Enable automatic tracing of new threads. */
173 record_btrace_auto_enable (void)
175 DEBUG ("attach thread observer");
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
181 /* Disable automatic tracing of new threads. */
184 record_btrace_auto_disable (void)
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
190 DEBUG ("detach thread observer");
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
196 /* The record-btrace async event handler function. */
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
201 inferior_event_handler (INF_REG_EVENT, NULL);
204 /* See record-btrace.h. */
207 record_btrace_push_target (void)
211 record_btrace_auto_enable ();
213 push_target (&record_btrace_ops);
215 record_btrace_async_inferior_event_handler
216 = create_async_event_handler (record_btrace_handle_async_inferior_event,
218 record_btrace_generating_corefile = 0;
220 format = btrace_format_short_string (record_btrace_conf.format);
221 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 /* The to_open method of target record-btrace. */
227 record_btrace_open (const char *args, int from_tty)
229 struct cleanup *disable_chain;
230 struct thread_info *tp;
236 if (!target_has_execution)
237 error (_("The program is not being run."));
239 gdb_assert (record_btrace_thread_observer == NULL);
241 disable_chain = make_cleanup (null_cleanup, NULL);
242 ALL_NON_EXITED_THREADS (tp)
243 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
245 btrace_enable (tp, &record_btrace_conf);
247 make_cleanup (record_btrace_disable_callback, tp);
250 record_btrace_push_target ();
252 discard_cleanups (disable_chain);
255 /* The to_stop_recording method of target record-btrace. */
258 record_btrace_stop_recording (struct target_ops *self)
260 struct thread_info *tp;
262 DEBUG ("stop recording");
264 record_btrace_auto_disable ();
266 ALL_NON_EXITED_THREADS (tp)
267 if (tp->btrace.target != NULL)
271 /* The to_disconnect method of target record-btrace. */
274 record_btrace_disconnect (struct target_ops *self, const char *args,
277 struct target_ops *beneath = self->beneath;
279 /* Do not stop recording, just clean up GDB side. */
280 unpush_target (self);
282 /* Forward disconnect. */
283 beneath->to_disconnect (beneath, args, from_tty);
286 /* The to_close method of target record-btrace. */
289 record_btrace_close (struct target_ops *self)
291 struct thread_info *tp;
293 if (record_btrace_async_inferior_event_handler != NULL)
294 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
296 /* Make sure automatic recording gets disabled even if we did not stop
297 recording before closing the record-btrace target. */
298 record_btrace_auto_disable ();
300 /* We should have already stopped recording.
301 Tear down btrace in case we have not. */
302 ALL_NON_EXITED_THREADS (tp)
303 btrace_teardown (tp);
306 /* The to_async method of target record-btrace. */
309 record_btrace_async (struct target_ops *ops, int enable)
312 mark_async_event_handler (record_btrace_async_inferior_event_handler);
314 clear_async_event_handler (record_btrace_async_inferior_event_handler);
316 ops->beneath->to_async (ops->beneath, enable);
319 /* Adjusts the size and returns a human readable size suffix. */
322 record_btrace_adjust_size (unsigned int *size)
328 if ((sz & ((1u << 30) - 1)) == 0)
333 else if ((sz & ((1u << 20) - 1)) == 0)
338 else if ((sz & ((1u << 10) - 1)) == 0)
347 /* Print a BTS configuration. */
350 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
358 suffix = record_btrace_adjust_size (&size);
359 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
363 /* Print an Intel Processor Trace configuration. */
366 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
374 suffix = record_btrace_adjust_size (&size);
375 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
379 /* Print a branch tracing configuration. */
382 record_btrace_print_conf (const struct btrace_config *conf)
384 printf_unfiltered (_("Recording format: %s.\n"),
385 btrace_format_string (conf->format));
387 switch (conf->format)
389 case BTRACE_FORMAT_NONE:
392 case BTRACE_FORMAT_BTS:
393 record_btrace_print_bts_conf (&conf->bts);
396 case BTRACE_FORMAT_PT:
397 record_btrace_print_pt_conf (&conf->pt);
401 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 /* The to_info_record method of target record-btrace. */
407 record_btrace_info (struct target_ops *self)
409 struct btrace_thread_info *btinfo;
410 const struct btrace_config *conf;
411 struct thread_info *tp;
412 unsigned int insns, calls, gaps;
416 tp = find_thread_ptid (inferior_ptid);
418 error (_("No thread."));
420 btinfo = &tp->btrace;
422 conf = btrace_conf (btinfo);
424 record_btrace_print_conf (conf);
432 if (!btrace_is_empty (tp))
434 struct btrace_call_iterator call;
435 struct btrace_insn_iterator insn;
437 btrace_call_end (&call, btinfo);
438 btrace_call_prev (&call, 1);
439 calls = btrace_call_number (&call);
441 btrace_insn_end (&insn, btinfo);
443 insns = btrace_insn_number (&insn);
446 /* The last instruction does not really belong to the trace. */
453 /* Skip gaps at the end. */
456 steps = btrace_insn_prev (&insn, 1);
460 insns = btrace_insn_number (&insn);
465 gaps = btinfo->ngaps;
468 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
469 "for thread %s (%s).\n"), insns, calls, gaps,
470 print_thread_id (tp), target_pid_to_str (tp->ptid));
472 if (btrace_is_replaying (tp))
473 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
474 btrace_insn_number (btinfo->replay));
477 /* Print a decode error. */
480 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
481 enum btrace_format format)
486 errstr = _("unknown");
494 case BTRACE_FORMAT_BTS:
500 case BDE_BTS_OVERFLOW:
501 errstr = _("instruction overflow");
504 case BDE_BTS_INSN_SIZE:
505 errstr = _("unknown instruction");
510 #if defined (HAVE_LIBIPT)
511 case BTRACE_FORMAT_PT:
514 case BDE_PT_USER_QUIT:
516 errstr = _("trace decode cancelled");
519 case BDE_PT_DISABLED:
521 errstr = _("disabled");
524 case BDE_PT_OVERFLOW:
526 errstr = _("overflow");
531 errstr = pt_errstr (pt_errcode (errcode));
535 #endif /* defined (HAVE_LIBIPT) */
538 ui_out_text (uiout, _("["));
541 ui_out_text (uiout, _("decode error ("));
542 ui_out_field_int (uiout, "errcode", errcode);
543 ui_out_text (uiout, _("): "));
545 ui_out_text (uiout, errstr);
546 ui_out_text (uiout, _("]\n"));
549 /* Print an unsigned int. */
552 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
554 ui_out_field_fmt (uiout, fld, "%u", val);
557 /* A range of source lines. */
559 struct btrace_line_range
561 /* The symtab this line is from. */
562 struct symtab *symtab;
564 /* The first line (inclusive). */
567 /* The last line (exclusive). */
571 /* Construct a line range. */
573 static struct btrace_line_range
574 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
576 struct btrace_line_range range;
578 range.symtab = symtab;
585 /* Add a line to a line range. */
587 static struct btrace_line_range
588 btrace_line_range_add (struct btrace_line_range range, int line)
590 if (range.end <= range.begin)
592 /* This is the first entry. */
594 range.end = line + 1;
596 else if (line < range.begin)
598 else if (range.end < line)
604 /* Return non-zero if RANGE is empty, zero otherwise. */
607 btrace_line_range_is_empty (struct btrace_line_range range)
609 return range.end <= range.begin;
612 /* Return non-zero if LHS contains RHS, zero otherwise. */
615 btrace_line_range_contains_range (struct btrace_line_range lhs,
616 struct btrace_line_range rhs)
618 return ((lhs.symtab == rhs.symtab)
619 && (lhs.begin <= rhs.begin)
620 && (rhs.end <= lhs.end));
623 /* Find the line range associated with PC. */
625 static struct btrace_line_range
626 btrace_find_line_range (CORE_ADDR pc)
628 struct btrace_line_range range;
629 struct linetable_entry *lines;
630 struct linetable *ltable;
631 struct symtab *symtab;
634 symtab = find_pc_line_symtab (pc);
636 return btrace_mk_line_range (NULL, 0, 0);
638 ltable = SYMTAB_LINETABLE (symtab);
640 return btrace_mk_line_range (symtab, 0, 0);
642 nlines = ltable->nitems;
643 lines = ltable->item;
645 return btrace_mk_line_range (symtab, 0, 0);
647 range = btrace_mk_line_range (symtab, 0, 0);
648 for (i = 0; i < nlines - 1; i++)
650 if ((lines[i].pc == pc) && (lines[i].line != 0))
651 range = btrace_line_range_add (range, lines[i].line);
657 /* Print source lines in LINES to UIOUT.
659 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
660 instructions corresponding to that source line. When printing a new source
661 line, we do the cleanups for the open chain and open a new cleanup chain for
662 the new source line. If the source line range in LINES is not empty, this
663 function will leave the cleanup chain for the last printed source line open
664 so instructions can be added to it. */
667 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
668 struct cleanup **ui_item_chain, int flags)
670 print_source_lines_flags psl_flags;
674 if (flags & DISASSEMBLY_FILENAME)
675 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
677 for (line = lines.begin; line < lines.end; ++line)
679 if (*ui_item_chain != NULL)
680 do_cleanups (*ui_item_chain);
683 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
685 print_source_lines (lines.symtab, line, line + 1, psl_flags);
687 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
691 /* Disassemble a section of the recorded instruction trace. */
694 btrace_insn_history (struct ui_out *uiout,
695 const struct btrace_thread_info *btinfo,
696 const struct btrace_insn_iterator *begin,
697 const struct btrace_insn_iterator *end, int flags)
700 struct cleanup *cleanups, *ui_item_chain;
701 struct disassemble_info di;
702 struct gdbarch *gdbarch;
703 struct btrace_insn_iterator it;
704 struct btrace_line_range last_lines;
706 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
707 btrace_insn_number (end));
709 flags |= DISASSEMBLY_SPECULATIVE;
711 gdbarch = target_gdbarch ();
712 stb = mem_fileopen ();
713 cleanups = make_cleanup_ui_file_delete (stb);
714 di = gdb_disassemble_info (gdbarch, stb);
715 last_lines = btrace_mk_line_range (NULL, 0, 0);
717 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
719 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
720 instructions corresponding to that line. */
721 ui_item_chain = NULL;
723 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
725 const struct btrace_insn *insn;
727 insn = btrace_insn_get (&it);
729 /* A NULL instruction indicates a gap in the trace. */
732 const struct btrace_config *conf;
734 conf = btrace_conf (btinfo);
736 /* We have trace so we must have a configuration. */
737 gdb_assert (conf != NULL);
739 btrace_ui_out_decode_error (uiout, it.function->errcode,
744 struct disasm_insn dinsn;
746 if ((flags & DISASSEMBLY_SOURCE) != 0)
748 struct btrace_line_range lines;
750 lines = btrace_find_line_range (insn->pc);
751 if (!btrace_line_range_is_empty (lines)
752 && !btrace_line_range_contains_range (last_lines, lines))
754 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
757 else if (ui_item_chain == NULL)
760 = make_cleanup_ui_out_tuple_begin_end (uiout,
762 /* No source information. */
763 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
766 gdb_assert (ui_item_chain != NULL);
769 memset (&dinsn, 0, sizeof (dinsn));
770 dinsn.number = btrace_insn_number (&it);
771 dinsn.addr = insn->pc;
773 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
774 dinsn.is_speculative = 1;
776 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
780 do_cleanups (cleanups);
783 /* The to_insn_history method of target record-btrace. */
786 record_btrace_insn_history (struct target_ops *self, int size, int flags)
788 struct btrace_thread_info *btinfo;
789 struct btrace_insn_history *history;
790 struct btrace_insn_iterator begin, end;
791 struct cleanup *uiout_cleanup;
792 struct ui_out *uiout;
793 unsigned int context, covered;
795 uiout = current_uiout;
796 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
798 context = abs (size);
800 error (_("Bad record instruction-history-size."));
802 btinfo = require_btrace ();
803 history = btinfo->insn_history;
806 struct btrace_insn_iterator *replay;
808 DEBUG ("insn-history (0x%x): %d", flags, size);
810 /* If we're replaying, we start at the replay position. Otherwise, we
811 start at the tail of the trace. */
812 replay = btinfo->replay;
816 btrace_insn_end (&begin, btinfo);
818 /* We start from here and expand in the requested direction. Then we
819 expand in the other direction, as well, to fill up any remaining
824 /* We want the current position covered, as well. */
825 covered = btrace_insn_next (&end, 1);
826 covered += btrace_insn_prev (&begin, context - covered);
827 covered += btrace_insn_next (&end, context - covered);
831 covered = btrace_insn_next (&end, context);
832 covered += btrace_insn_prev (&begin, context - covered);
837 begin = history->begin;
840 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
841 btrace_insn_number (&begin), btrace_insn_number (&end));
846 covered = btrace_insn_prev (&begin, context);
851 covered = btrace_insn_next (&end, context);
856 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
860 printf_unfiltered (_("At the start of the branch trace record.\n"));
862 printf_unfiltered (_("At the end of the branch trace record.\n"));
865 btrace_set_insn_history (btinfo, &begin, &end);
866 do_cleanups (uiout_cleanup);
869 /* The to_insn_history_range method of target record-btrace. */
872 record_btrace_insn_history_range (struct target_ops *self,
873 ULONGEST from, ULONGEST to, int flags)
875 struct btrace_thread_info *btinfo;
876 struct btrace_insn_history *history;
877 struct btrace_insn_iterator begin, end;
878 struct cleanup *uiout_cleanup;
879 struct ui_out *uiout;
880 unsigned int low, high;
883 uiout = current_uiout;
884 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
889 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
891 /* Check for wrap-arounds. */
892 if (low != from || high != to)
893 error (_("Bad range."));
896 error (_("Bad range."));
898 btinfo = require_btrace ();
900 found = btrace_find_insn_by_number (&begin, btinfo, low);
902 error (_("Range out of bounds."));
904 found = btrace_find_insn_by_number (&end, btinfo, high);
907 /* Silently truncate the range. */
908 btrace_insn_end (&end, btinfo);
912 /* We want both begin and end to be inclusive. */
913 btrace_insn_next (&end, 1);
916 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
917 btrace_set_insn_history (btinfo, &begin, &end);
919 do_cleanups (uiout_cleanup);
922 /* The to_insn_history_from method of target record-btrace. */
925 record_btrace_insn_history_from (struct target_ops *self,
926 ULONGEST from, int size, int flags)
928 ULONGEST begin, end, context;
930 context = abs (size);
932 error (_("Bad record instruction-history-size."));
941 begin = from - context + 1;
946 end = from + context - 1;
948 /* Check for wrap-around. */
953 record_btrace_insn_history_range (self, begin, end, flags);
956 /* Print the instruction number range for a function call history line. */
959 btrace_call_history_insn_range (struct ui_out *uiout,
960 const struct btrace_function *bfun)
962 unsigned int begin, end, size;
964 size = VEC_length (btrace_insn_s, bfun->insn);
965 gdb_assert (size > 0);
967 begin = bfun->insn_offset;
968 end = begin + size - 1;
970 ui_out_field_uint (uiout, "insn begin", begin);
971 ui_out_text (uiout, ",");
972 ui_out_field_uint (uiout, "insn end", end);
975 /* Compute the lowest and highest source line for the instructions in BFUN
976 and return them in PBEGIN and PEND.
977 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
978 result from inlining or macro expansion. */
981 btrace_compute_src_line_range (const struct btrace_function *bfun,
982 int *pbegin, int *pend)
984 struct btrace_insn *insn;
985 struct symtab *symtab;
997 symtab = symbol_symtab (sym);
999 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1001 struct symtab_and_line sal;
1003 sal = find_pc_line (insn->pc, 0);
1004 if (sal.symtab != symtab || sal.line == 0)
1007 begin = std::min (begin, sal.line);
1008 end = std::max (end, sal.line);
1016 /* Print the source line information for a function call history line. */
1019 btrace_call_history_src_line (struct ui_out *uiout,
1020 const struct btrace_function *bfun)
1029 ui_out_field_string (uiout, "file",
1030 symtab_to_filename_for_display (symbol_symtab (sym)));
1032 btrace_compute_src_line_range (bfun, &begin, &end);
1036 ui_out_text (uiout, ":");
1037 ui_out_field_int (uiout, "min line", begin);
1042 ui_out_text (uiout, ",");
1043 ui_out_field_int (uiout, "max line", end);
1046 /* Get the name of a branch trace function. */
1049 btrace_get_bfun_name (const struct btrace_function *bfun)
1051 struct minimal_symbol *msym;
1061 return SYMBOL_PRINT_NAME (sym);
1062 else if (msym != NULL)
1063 return MSYMBOL_PRINT_NAME (msym);
1068 /* Disassemble a section of the recorded function trace. */
1071 btrace_call_history (struct ui_out *uiout,
1072 const struct btrace_thread_info *btinfo,
1073 const struct btrace_call_iterator *begin,
1074 const struct btrace_call_iterator *end,
1077 struct btrace_call_iterator it;
1078 record_print_flags flags = (enum record_print_flag) int_flags;
1080 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1081 btrace_call_number (end));
1083 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1085 const struct btrace_function *bfun;
1086 struct minimal_symbol *msym;
1089 bfun = btrace_call_get (&it);
1093 /* Print the function index. */
1094 ui_out_field_uint (uiout, "index", bfun->number);
1095 ui_out_text (uiout, "\t");
1097 /* Indicate gaps in the trace. */
1098 if (bfun->errcode != 0)
1100 const struct btrace_config *conf;
1102 conf = btrace_conf (btinfo);
1104 /* We have trace so we must have a configuration. */
1105 gdb_assert (conf != NULL);
1107 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1112 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1114 int level = bfun->level + btinfo->level, i;
1116 for (i = 0; i < level; ++i)
1117 ui_out_text (uiout, " ");
1121 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1122 else if (msym != NULL)
1123 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1124 else if (!ui_out_is_mi_like_p (uiout))
1125 ui_out_field_string (uiout, "function", "??");
1127 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1129 ui_out_text (uiout, _("\tinst "));
1130 btrace_call_history_insn_range (uiout, bfun);
1133 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1135 ui_out_text (uiout, _("\tat "));
1136 btrace_call_history_src_line (uiout, bfun);
1139 ui_out_text (uiout, "\n");
1143 /* The to_call_history method of target record-btrace. */
1146 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1148 struct btrace_thread_info *btinfo;
1149 struct btrace_call_history *history;
1150 struct btrace_call_iterator begin, end;
1151 struct cleanup *uiout_cleanup;
1152 struct ui_out *uiout;
1153 unsigned int context, covered;
1154 record_print_flags flags = (enum record_print_flag) int_flags;
1156 uiout = current_uiout;
1157 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1159 context = abs (size);
1161 error (_("Bad record function-call-history-size."));
1163 btinfo = require_btrace ();
1164 history = btinfo->call_history;
1165 if (history == NULL)
1167 struct btrace_insn_iterator *replay;
1169 DEBUG ("call-history (0x%x): %d", int_flags, size);
1171 /* If we're replaying, we start at the replay position. Otherwise, we
1172 start at the tail of the trace. */
1173 replay = btinfo->replay;
1176 begin.function = replay->function;
1177 begin.btinfo = btinfo;
1180 btrace_call_end (&begin, btinfo);
1182 /* We start from here and expand in the requested direction. Then we
1183 expand in the other direction, as well, to fill up any remaining
1188 /* We want the current position covered, as well. */
1189 covered = btrace_call_next (&end, 1);
1190 covered += btrace_call_prev (&begin, context - covered);
1191 covered += btrace_call_next (&end, context - covered);
1195 covered = btrace_call_next (&end, context);
1196 covered += btrace_call_prev (&begin, context- covered);
1201 begin = history->begin;
1204 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1205 btrace_call_number (&begin), btrace_call_number (&end));
1210 covered = btrace_call_prev (&begin, context);
1215 covered = btrace_call_next (&end, context);
1220 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1224 printf_unfiltered (_("At the start of the branch trace record.\n"));
1226 printf_unfiltered (_("At the end of the branch trace record.\n"));
1229 btrace_set_call_history (btinfo, &begin, &end);
1230 do_cleanups (uiout_cleanup);
1233 /* The to_call_history_range method of target record-btrace. */
1236 record_btrace_call_history_range (struct target_ops *self,
1237 ULONGEST from, ULONGEST to,
1240 struct btrace_thread_info *btinfo;
1241 struct btrace_call_history *history;
1242 struct btrace_call_iterator begin, end;
1243 struct cleanup *uiout_cleanup;
1244 struct ui_out *uiout;
1245 unsigned int low, high;
1247 record_print_flags flags = (enum record_print_flag) int_flags;
1249 uiout = current_uiout;
1250 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1255 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1257 /* Check for wrap-arounds. */
1258 if (low != from || high != to)
1259 error (_("Bad range."));
1262 error (_("Bad range."));
1264 btinfo = require_btrace ();
1266 found = btrace_find_call_by_number (&begin, btinfo, low);
1268 error (_("Range out of bounds."));
1270 found = btrace_find_call_by_number (&end, btinfo, high);
1273 /* Silently truncate the range. */
1274 btrace_call_end (&end, btinfo);
1278 /* We want both begin and end to be inclusive. */
1279 btrace_call_next (&end, 1);
1282 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1283 btrace_set_call_history (btinfo, &begin, &end);
1285 do_cleanups (uiout_cleanup);
1288 /* The to_call_history_from method of target record-btrace. */
1291 record_btrace_call_history_from (struct target_ops *self,
1292 ULONGEST from, int size,
1295 ULONGEST begin, end, context;
1296 record_print_flags flags = (enum record_print_flag) int_flags;
1298 context = abs (size);
1300 error (_("Bad record function-call-history-size."));
1309 begin = from - context + 1;
1314 end = from + context - 1;
1316 /* Check for wrap-around. */
1321 record_btrace_call_history_range (self, begin, end, flags);
1324 /* The to_record_is_replaying method of target record-btrace. */
1327 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1329 struct thread_info *tp;
1331 ALL_NON_EXITED_THREADS (tp)
1332 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1338 /* The to_record_will_replay method of target record-btrace. */
1341 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1343 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1346 /* The to_xfer_partial method of target record-btrace. */
1348 static enum target_xfer_status
1349 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1350 const char *annex, gdb_byte *readbuf,
1351 const gdb_byte *writebuf, ULONGEST offset,
1352 ULONGEST len, ULONGEST *xfered_len)
1354 struct target_ops *t;
1356 /* Filter out requests that don't make sense during replay. */
1357 if (replay_memory_access == replay_memory_access_read_only
1358 && !record_btrace_generating_corefile
1359 && record_btrace_is_replaying (ops, inferior_ptid))
1363 case TARGET_OBJECT_MEMORY:
1365 struct target_section *section;
1367 /* We do not allow writing memory in general. */
1368 if (writebuf != NULL)
1371 return TARGET_XFER_UNAVAILABLE;
1374 /* We allow reading readonly memory. */
1375 section = target_section_by_addr (ops, offset);
1376 if (section != NULL)
1378 /* Check if the section we found is readonly. */
1379 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1380 section->the_bfd_section)
1381 & SEC_READONLY) != 0)
1383 /* Truncate the request to fit into this section. */
1384 len = std::min (len, section->endaddr - offset);
1390 return TARGET_XFER_UNAVAILABLE;
1395 /* Forward the request. */
1397 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1398 offset, len, xfered_len);
1401 /* The to_insert_breakpoint method of target record-btrace. */
1404 record_btrace_insert_breakpoint (struct target_ops *ops,
1405 struct gdbarch *gdbarch,
1406 struct bp_target_info *bp_tgt)
1411 /* Inserting breakpoints requires accessing memory. Allow it for the
1412 duration of this function. */
1413 old = replay_memory_access;
1414 replay_memory_access = replay_memory_access_read_write;
1419 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1421 CATCH (except, RETURN_MASK_ALL)
1423 replay_memory_access = old;
1424 throw_exception (except);
1427 replay_memory_access = old;
1432 /* The to_remove_breakpoint method of target record-btrace. */
1435 record_btrace_remove_breakpoint (struct target_ops *ops,
1436 struct gdbarch *gdbarch,
1437 struct bp_target_info *bp_tgt,
1438 enum remove_bp_reason reason)
1443 /* Removing breakpoints requires accessing memory. Allow it for the
1444 duration of this function. */
1445 old = replay_memory_access;
1446 replay_memory_access = replay_memory_access_read_write;
1451 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1454 CATCH (except, RETURN_MASK_ALL)
1456 replay_memory_access = old;
1457 throw_exception (except);
1460 replay_memory_access = old;
1465 /* The to_fetch_registers method of target record-btrace. */
1468 record_btrace_fetch_registers (struct target_ops *ops,
1469 struct regcache *regcache, int regno)
1471 struct btrace_insn_iterator *replay;
1472 struct thread_info *tp;
1474 tp = find_thread_ptid (inferior_ptid);
1475 gdb_assert (tp != NULL);
1477 replay = tp->btrace.replay;
1478 if (replay != NULL && !record_btrace_generating_corefile)
1480 const struct btrace_insn *insn;
1481 struct gdbarch *gdbarch;
1484 gdbarch = get_regcache_arch (regcache);
1485 pcreg = gdbarch_pc_regnum (gdbarch);
1489 /* We can only provide the PC register. */
1490 if (regno >= 0 && regno != pcreg)
1493 insn = btrace_insn_get (replay);
1494 gdb_assert (insn != NULL);
1496 regcache_raw_supply (regcache, regno, &insn->pc);
1500 struct target_ops *t = ops->beneath;
1502 t->to_fetch_registers (t, regcache, regno);
1506 /* The to_store_registers method of target record-btrace. */
1509 record_btrace_store_registers (struct target_ops *ops,
1510 struct regcache *regcache, int regno)
1512 struct target_ops *t;
1514 if (!record_btrace_generating_corefile
1515 && record_btrace_is_replaying (ops, inferior_ptid))
1516 error (_("Cannot write registers while replaying."));
1518 gdb_assert (may_write_registers != 0);
1521 t->to_store_registers (t, regcache, regno);
1524 /* The to_prepare_to_store method of target record-btrace. */
1527 record_btrace_prepare_to_store (struct target_ops *ops,
1528 struct regcache *regcache)
1530 struct target_ops *t;
1532 if (!record_btrace_generating_corefile
1533 && record_btrace_is_replaying (ops, inferior_ptid))
1537 t->to_prepare_to_store (t, regcache);
1540 /* The branch trace frame cache. */
1542 struct btrace_frame_cache
1545 struct thread_info *tp;
1547 /* The frame info. */
1548 struct frame_info *frame;
1550 /* The branch trace function segment. */
1551 const struct btrace_function *bfun;
1554 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1556 static htab_t bfcache;
1558 /* hash_f for htab_create_alloc of bfcache. */
1561 bfcache_hash (const void *arg)
1563 const struct btrace_frame_cache *cache
1564 = (const struct btrace_frame_cache *) arg;
1566 return htab_hash_pointer (cache->frame);
1569 /* eq_f for htab_create_alloc of bfcache. */
1572 bfcache_eq (const void *arg1, const void *arg2)
1574 const struct btrace_frame_cache *cache1
1575 = (const struct btrace_frame_cache *) arg1;
1576 const struct btrace_frame_cache *cache2
1577 = (const struct btrace_frame_cache *) arg2;
1579 return cache1->frame == cache2->frame;
1582 /* Create a new btrace frame cache. */
1584 static struct btrace_frame_cache *
1585 bfcache_new (struct frame_info *frame)
1587 struct btrace_frame_cache *cache;
1590 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1591 cache->frame = frame;
1593 slot = htab_find_slot (bfcache, cache, INSERT);
1594 gdb_assert (*slot == NULL);
1600 /* Extract the branch trace function from a branch trace frame. */
1602 static const struct btrace_function *
1603 btrace_get_frame_function (struct frame_info *frame)
1605 const struct btrace_frame_cache *cache;
1606 const struct btrace_function *bfun;
1607 struct btrace_frame_cache pattern;
1610 pattern.frame = frame;
1612 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1616 cache = (const struct btrace_frame_cache *) *slot;
1620 /* Implement stop_reason method for record_btrace_frame_unwind. */
1622 static enum unwind_stop_reason
1623 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1626 const struct btrace_frame_cache *cache;
1627 const struct btrace_function *bfun;
1629 cache = (const struct btrace_frame_cache *) *this_cache;
1631 gdb_assert (bfun != NULL);
1633 if (bfun->up == NULL)
1634 return UNWIND_UNAVAILABLE;
1636 return UNWIND_NO_REASON;
1639 /* Implement this_id method for record_btrace_frame_unwind. */
1642 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1643 struct frame_id *this_id)
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun;
1647 CORE_ADDR code, special;
1649 cache = (const struct btrace_frame_cache *) *this_cache;
1652 gdb_assert (bfun != NULL);
1654 while (bfun->segment.prev != NULL)
1655 bfun = bfun->segment.prev;
1657 code = get_frame_func (this_frame);
1658 special = bfun->number;
1660 *this_id = frame_id_build_unavailable_stack_special (code, special);
1662 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1663 btrace_get_bfun_name (cache->bfun),
1664 core_addr_to_string_nz (this_id->code_addr),
1665 core_addr_to_string_nz (this_id->special_addr));
1668 /* Implement prev_register method for record_btrace_frame_unwind. */
1670 static struct value *
1671 record_btrace_frame_prev_register (struct frame_info *this_frame,
1675 const struct btrace_frame_cache *cache;
1676 const struct btrace_function *bfun, *caller;
1677 const struct btrace_insn *insn;
1678 struct gdbarch *gdbarch;
1682 gdbarch = get_frame_arch (this_frame);
1683 pcreg = gdbarch_pc_regnum (gdbarch);
1684 if (pcreg < 0 || regnum != pcreg)
1685 throw_error (NOT_AVAILABLE_ERROR,
1686 _("Registers are not available in btrace record history"));
1688 cache = (const struct btrace_frame_cache *) *this_cache;
1690 gdb_assert (bfun != NULL);
1694 throw_error (NOT_AVAILABLE_ERROR,
1695 _("No caller in btrace record history"));
1697 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1699 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1704 insn = VEC_last (btrace_insn_s, caller->insn);
1707 pc += gdb_insn_length (gdbarch, pc);
1710 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1711 btrace_get_bfun_name (bfun), bfun->level,
1712 core_addr_to_string_nz (pc));
1714 return frame_unwind_got_address (this_frame, regnum, pc);
1717 /* Implement sniffer method for record_btrace_frame_unwind. */
1720 record_btrace_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1724 const struct btrace_function *bfun;
1725 struct btrace_frame_cache *cache;
1726 struct thread_info *tp;
1727 struct frame_info *next;
1729 /* THIS_FRAME does not contain a reference to its thread. */
1730 tp = find_thread_ptid (inferior_ptid);
1731 gdb_assert (tp != NULL);
1734 next = get_next_frame (this_frame);
1737 const struct btrace_insn_iterator *replay;
1739 replay = tp->btrace.replay;
1741 bfun = replay->function;
1745 const struct btrace_function *callee;
1747 callee = btrace_get_frame_function (next);
1748 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1755 DEBUG ("[frame] sniffed frame for %s on level %d",
1756 btrace_get_bfun_name (bfun), bfun->level);
1758 /* This is our frame. Initialize the frame cache. */
1759 cache = bfcache_new (this_frame);
1763 *this_cache = cache;
1767 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1770 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1771 struct frame_info *this_frame,
1774 const struct btrace_function *bfun, *callee;
1775 struct btrace_frame_cache *cache;
1776 struct frame_info *next;
1778 next = get_next_frame (this_frame);
1782 callee = btrace_get_frame_function (next);
1786 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1793 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1794 btrace_get_bfun_name (bfun), bfun->level);
1796 /* This is our frame. Initialize the frame cache. */
1797 cache = bfcache_new (this_frame);
1798 cache->tp = find_thread_ptid (inferior_ptid);
1801 *this_cache = cache;
1806 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1808 struct btrace_frame_cache *cache;
1811 cache = (struct btrace_frame_cache *) this_cache;
1813 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1814 gdb_assert (slot != NULL);
1816 htab_remove_elt (bfcache, cache);
1819 /* btrace recording does not store previous memory content, neither the stack
1820 frames content. Any unwinding would return errorneous results as the stack
1821 contents no longer matches the changed PC value restored from history.
1822 Therefore this unwinder reports any possibly unwound registers as
1825 const struct frame_unwind record_btrace_frame_unwind =
1828 record_btrace_frame_unwind_stop_reason,
1829 record_btrace_frame_this_id,
1830 record_btrace_frame_prev_register,
1832 record_btrace_frame_sniffer,
1833 record_btrace_frame_dealloc_cache
1836 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1839 record_btrace_frame_unwind_stop_reason,
1840 record_btrace_frame_this_id,
1841 record_btrace_frame_prev_register,
1843 record_btrace_tailcall_frame_sniffer,
1844 record_btrace_frame_dealloc_cache
1847 /* Implement the to_get_unwinder method. */
1849 static const struct frame_unwind *
1850 record_btrace_to_get_unwinder (struct target_ops *self)
1852 return &record_btrace_frame_unwind;
1855 /* Implement the to_get_tailcall_unwinder method. */
1857 static const struct frame_unwind *
1858 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1860 return &record_btrace_tailcall_frame_unwind;
1863 /* Return a human-readable string for FLAG. */
1866 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1874 return "reverse-step";
1880 return "reverse-cont";
1889 /* Indicate that TP should be resumed according to FLAG. */
1892 record_btrace_resume_thread (struct thread_info *tp,
1893 enum btrace_thread_flag flag)
1895 struct btrace_thread_info *btinfo;
1897 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1898 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1900 btinfo = &tp->btrace;
1902 /* Fetch the latest branch trace. */
1905 /* A resume request overwrites a preceding resume or stop request. */
1906 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1907 btinfo->flags |= flag;
1910 /* Get the current frame for TP. */
1912 static struct frame_info *
1913 get_thread_current_frame (struct thread_info *tp)
1915 struct frame_info *frame;
1916 ptid_t old_inferior_ptid;
1919 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1920 old_inferior_ptid = inferior_ptid;
1921 inferior_ptid = tp->ptid;
1923 /* Clear the executing flag to allow changes to the current frame.
1924 We are not actually running, yet. We just started a reverse execution
1925 command or a record goto command.
1926 For the latter, EXECUTING is false and this has no effect.
1927 For the former, EXECUTING is true and we're in to_wait, about to
1928 move the thread. Since we need to recompute the stack, we temporarily
1929 set EXECUTING to flase. */
1930 executing = is_executing (inferior_ptid);
1931 set_executing (inferior_ptid, 0);
1936 frame = get_current_frame ();
1938 CATCH (except, RETURN_MASK_ALL)
1940 /* Restore the previous execution state. */
1941 set_executing (inferior_ptid, executing);
1943 /* Restore the previous inferior_ptid. */
1944 inferior_ptid = old_inferior_ptid;
1946 throw_exception (except);
1950 /* Restore the previous execution state. */
1951 set_executing (inferior_ptid, executing);
1953 /* Restore the previous inferior_ptid. */
1954 inferior_ptid = old_inferior_ptid;
1959 /* Start replaying a thread. */
1961 static struct btrace_insn_iterator *
1962 record_btrace_start_replaying (struct thread_info *tp)
1964 struct btrace_insn_iterator *replay;
1965 struct btrace_thread_info *btinfo;
1967 btinfo = &tp->btrace;
1970 /* We can't start replaying without trace. */
1971 if (btinfo->begin == NULL)
1974 /* GDB stores the current frame_id when stepping in order to detects steps
1976 Since frames are computed differently when we're replaying, we need to
1977 recompute those stored frames and fix them up so we can still detect
1978 subroutines after we started replaying. */
1981 struct frame_info *frame;
1982 struct frame_id frame_id;
1983 int upd_step_frame_id, upd_step_stack_frame_id;
1985 /* The current frame without replaying - computed via normal unwind. */
1986 frame = get_thread_current_frame (tp);
1987 frame_id = get_frame_id (frame);
1989 /* Check if we need to update any stepping-related frame id's. */
1990 upd_step_frame_id = frame_id_eq (frame_id,
1991 tp->control.step_frame_id);
1992 upd_step_stack_frame_id = frame_id_eq (frame_id,
1993 tp->control.step_stack_frame_id);
1995 /* We start replaying at the end of the branch trace. This corresponds
1996 to the current instruction. */
1997 replay = XNEW (struct btrace_insn_iterator);
1998 btrace_insn_end (replay, btinfo);
2000 /* Skip gaps at the end of the trace. */
2001 while (btrace_insn_get (replay) == NULL)
2005 steps = btrace_insn_prev (replay, 1);
2007 error (_("No trace."));
2010 /* We're not replaying, yet. */
2011 gdb_assert (btinfo->replay == NULL);
2012 btinfo->replay = replay;
2014 /* Make sure we're not using any stale registers. */
2015 registers_changed_ptid (tp->ptid);
2017 /* The current frame with replaying - computed via btrace unwind. */
2018 frame = get_thread_current_frame (tp);
2019 frame_id = get_frame_id (frame);
2021 /* Replace stepping related frames where necessary. */
2022 if (upd_step_frame_id)
2023 tp->control.step_frame_id = frame_id;
2024 if (upd_step_stack_frame_id)
2025 tp->control.step_stack_frame_id = frame_id;
2027 CATCH (except, RETURN_MASK_ALL)
2029 xfree (btinfo->replay);
2030 btinfo->replay = NULL;
2032 registers_changed_ptid (tp->ptid);
2034 throw_exception (except);
2041 /* Stop replaying a thread. */
2044 record_btrace_stop_replaying (struct thread_info *tp)
2046 struct btrace_thread_info *btinfo;
2048 btinfo = &tp->btrace;
2050 xfree (btinfo->replay);
2051 btinfo->replay = NULL;
2053 /* Make sure we're not leaving any stale registers. */
2054 registers_changed_ptid (tp->ptid);
2057 /* Stop replaying TP if it is at the end of its execution history. */
2060 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2062 struct btrace_insn_iterator *replay, end;
2063 struct btrace_thread_info *btinfo;
2065 btinfo = &tp->btrace;
2066 replay = btinfo->replay;
2071 btrace_insn_end (&end, btinfo);
2073 if (btrace_insn_cmp (replay, &end) == 0)
2074 record_btrace_stop_replaying (tp);
2077 /* The to_resume method of target record-btrace. */
2080 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2081 enum gdb_signal signal)
2083 struct thread_info *tp;
2084 enum btrace_thread_flag flag, cflag;
2086 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2087 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2088 step ? "step" : "cont");
2090 /* Store the execution direction of the last resume.
2092 If there is more than one to_resume call, we have to rely on infrun
2093 to not change the execution direction in-between. */
2094 record_btrace_resume_exec_dir = execution_direction;
2096 /* As long as we're not replaying, just forward the request.
2098 For non-stop targets this means that no thread is replaying. In order to
2099 make progress, we may need to explicitly move replaying threads to the end
2100 of their execution history. */
2101 if ((execution_direction != EXEC_REVERSE)
2102 && !record_btrace_is_replaying (ops, minus_one_ptid))
2105 ops->to_resume (ops, ptid, step, signal);
2109 /* Compute the btrace thread flag for the requested move. */
2110 if (execution_direction == EXEC_REVERSE)
2112 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2117 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2121 /* We just indicate the resume intent here. The actual stepping happens in
2122 record_btrace_wait below.
2124 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2125 if (!target_is_non_stop_p ())
2127 gdb_assert (ptid_match (inferior_ptid, ptid));
2129 ALL_NON_EXITED_THREADS (tp)
2130 if (ptid_match (tp->ptid, ptid))
2132 if (ptid_match (tp->ptid, inferior_ptid))
2133 record_btrace_resume_thread (tp, flag);
2135 record_btrace_resume_thread (tp, cflag);
2140 ALL_NON_EXITED_THREADS (tp)
2141 if (ptid_match (tp->ptid, ptid))
2142 record_btrace_resume_thread (tp, flag);
2145 /* Async support. */
2146 if (target_can_async_p ())
2149 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2153 /* The to_commit_resume method of target record-btrace. */
2156 record_btrace_commit_resume (struct target_ops *ops)
2158 if ((execution_direction != EXEC_REVERSE)
2159 && !record_btrace_is_replaying (ops, minus_one_ptid))
2160 ops->beneath->to_commit_resume (ops->beneath);
2163 /* Cancel resuming TP. */
2166 record_btrace_cancel_resume (struct thread_info *tp)
2168 enum btrace_thread_flag flags;
2170 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2174 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2175 print_thread_id (tp),
2176 target_pid_to_str (tp->ptid), flags,
2177 btrace_thread_flag_to_str (flags));
2179 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2180 record_btrace_stop_replaying_at_end (tp);
2183 /* Return a target_waitstatus indicating that we ran out of history. */
2185 static struct target_waitstatus
2186 btrace_step_no_history (void)
2188 struct target_waitstatus status;
2190 status.kind = TARGET_WAITKIND_NO_HISTORY;
2195 /* Return a target_waitstatus indicating that a step finished. */
2197 static struct target_waitstatus
2198 btrace_step_stopped (void)
2200 struct target_waitstatus status;
2202 status.kind = TARGET_WAITKIND_STOPPED;
2203 status.value.sig = GDB_SIGNAL_TRAP;
2208 /* Return a target_waitstatus indicating that a thread was stopped as
2211 static struct target_waitstatus
2212 btrace_step_stopped_on_request (void)
2214 struct target_waitstatus status;
2216 status.kind = TARGET_WAITKIND_STOPPED;
2217 status.value.sig = GDB_SIGNAL_0;
2222 /* Return a target_waitstatus indicating a spurious stop. */
2224 static struct target_waitstatus
2225 btrace_step_spurious (void)
2227 struct target_waitstatus status;
2229 status.kind = TARGET_WAITKIND_SPURIOUS;
2234 /* Return a target_waitstatus indicating that the thread was not resumed. */
2236 static struct target_waitstatus
2237 btrace_step_no_resumed (void)
2239 struct target_waitstatus status;
2241 status.kind = TARGET_WAITKIND_NO_RESUMED;
2246 /* Return a target_waitstatus indicating that we should wait again. */
2248 static struct target_waitstatus
2249 btrace_step_again (void)
2251 struct target_waitstatus status;
2253 status.kind = TARGET_WAITKIND_IGNORE;
2258 /* Clear the record histories. */
2261 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2263 xfree (btinfo->insn_history);
2264 xfree (btinfo->call_history);
2266 btinfo->insn_history = NULL;
2267 btinfo->call_history = NULL;
2270 /* Check whether TP's current replay position is at a breakpoint. */
2273 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2275 struct btrace_insn_iterator *replay;
2276 struct btrace_thread_info *btinfo;
2277 const struct btrace_insn *insn;
2278 struct inferior *inf;
2280 btinfo = &tp->btrace;
2281 replay = btinfo->replay;
2286 insn = btrace_insn_get (replay);
2290 inf = find_inferior_ptid (tp->ptid);
2294 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2295 &btinfo->stop_reason);
2298 /* Step one instruction in forward direction. */
2300 static struct target_waitstatus
2301 record_btrace_single_step_forward (struct thread_info *tp)
2303 struct btrace_insn_iterator *replay, end, start;
2304 struct btrace_thread_info *btinfo;
2306 btinfo = &tp->btrace;
2307 replay = btinfo->replay;
2309 /* We're done if we're not replaying. */
2311 return btrace_step_no_history ();
2313 /* Check if we're stepping a breakpoint. */
2314 if (record_btrace_replay_at_breakpoint (tp))
2315 return btrace_step_stopped ();
2317 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2318 jump back to the instruction at which we started. */
2324 /* We will bail out here if we continue stepping after reaching the end
2325 of the execution history. */
2326 steps = btrace_insn_next (replay, 1);
2330 return btrace_step_no_history ();
2333 while (btrace_insn_get (replay) == NULL);
2335 /* Determine the end of the instruction trace. */
2336 btrace_insn_end (&end, btinfo);
2338 /* The execution trace contains (and ends with) the current instruction.
2339 This instruction has not been executed, yet, so the trace really ends
2340 one instruction earlier. */
2341 if (btrace_insn_cmp (replay, &end) == 0)
2342 return btrace_step_no_history ();
2344 return btrace_step_spurious ();
2347 /* Step one instruction in backward direction. */
2349 static struct target_waitstatus
2350 record_btrace_single_step_backward (struct thread_info *tp)
2352 struct btrace_insn_iterator *replay, start;
2353 struct btrace_thread_info *btinfo;
2355 btinfo = &tp->btrace;
2356 replay = btinfo->replay;
2358 /* Start replaying if we're not already doing so. */
2360 replay = record_btrace_start_replaying (tp);
2362 /* If we can't step any further, we reached the end of the history.
2363 Skip gaps during replay. If we end up at a gap (at the beginning of
2364 the trace), jump back to the instruction at which we started. */
2370 steps = btrace_insn_prev (replay, 1);
2374 return btrace_step_no_history ();
2377 while (btrace_insn_get (replay) == NULL);
2379 /* Check if we're stepping a breakpoint.
2381 For reverse-stepping, this check is after the step. There is logic in
2382 infrun.c that handles reverse-stepping separately. See, for example,
2383 proceed and adjust_pc_after_break.
2385 This code assumes that for reverse-stepping, PC points to the last
2386 de-executed instruction, whereas for forward-stepping PC points to the
2387 next to-be-executed instruction. */
2388 if (record_btrace_replay_at_breakpoint (tp))
2389 return btrace_step_stopped ();
2391 return btrace_step_spurious ();
2394 /* Step a single thread. */
2396 static struct target_waitstatus
2397 record_btrace_step_thread (struct thread_info *tp)
2399 struct btrace_thread_info *btinfo;
2400 struct target_waitstatus status;
2401 enum btrace_thread_flag flags;
2403 btinfo = &tp->btrace;
2405 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2406 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2408 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2409 target_pid_to_str (tp->ptid), flags,
2410 btrace_thread_flag_to_str (flags));
2412 /* We can't step without an execution history. */
2413 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2414 return btrace_step_no_history ();
2419 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2422 return btrace_step_stopped_on_request ();
2425 status = record_btrace_single_step_forward (tp);
2426 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2429 return btrace_step_stopped ();
2432 status = record_btrace_single_step_backward (tp);
2433 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2436 return btrace_step_stopped ();
2439 status = record_btrace_single_step_forward (tp);
2440 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2443 btinfo->flags |= flags;
2444 return btrace_step_again ();
2447 status = record_btrace_single_step_backward (tp);
2448 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2451 btinfo->flags |= flags;
2452 return btrace_step_again ();
2455 /* We keep threads moving at the end of their execution history. The to_wait
2456 method will stop the thread for whom the event is reported. */
2457 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2458 btinfo->flags |= flags;
2463 /* A vector of threads. */
2465 typedef struct thread_info * tp_t;
2468 /* Announce further events if necessary. */
2471 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2472 const VEC (tp_t) *no_history)
2474 int more_moving, more_no_history;
2476 more_moving = !VEC_empty (tp_t, moving);
2477 more_no_history = !VEC_empty (tp_t, no_history);
2479 if (!more_moving && !more_no_history)
2483 DEBUG ("movers pending");
2485 if (more_no_history)
2486 DEBUG ("no-history pending");
2488 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2491 /* The to_wait method of target record-btrace. */
2494 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2495 struct target_waitstatus *status, int options)
2497 VEC (tp_t) *moving, *no_history;
2498 struct thread_info *tp, *eventing;
2499 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2501 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2503 /* As long as we're not replaying, just forward the request. */
2504 if ((execution_direction != EXEC_REVERSE)
2505 && !record_btrace_is_replaying (ops, minus_one_ptid))
2508 return ops->to_wait (ops, ptid, status, options);
2514 make_cleanup (VEC_cleanup (tp_t), &moving);
2515 make_cleanup (VEC_cleanup (tp_t), &no_history);
2517 /* Keep a work list of moving threads. */
2518 ALL_NON_EXITED_THREADS (tp)
2519 if (ptid_match (tp->ptid, ptid)
2520 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2521 VEC_safe_push (tp_t, moving, tp);
2523 if (VEC_empty (tp_t, moving))
2525 *status = btrace_step_no_resumed ();
2527 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2528 target_waitstatus_to_string (status));
2530 do_cleanups (cleanups);
2534 /* Step moving threads one by one, one step each, until either one thread
2535 reports an event or we run out of threads to step.
2537 When stepping more than one thread, chances are that some threads reach
2538 the end of their execution history earlier than others. If we reported
2539 this immediately, all-stop on top of non-stop would stop all threads and
2540 resume the same threads next time. And we would report the same thread
2541 having reached the end of its execution history again.
2543 In the worst case, this would starve the other threads. But even if other
2544 threads would be allowed to make progress, this would result in far too
2545 many intermediate stops.
2547 We therefore delay the reporting of "no execution history" until we have
2548 nothing else to report. By this time, all threads should have moved to
2549 either the beginning or the end of their execution history. There will
2550 be a single user-visible stop. */
2552 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2557 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2559 *status = record_btrace_step_thread (tp);
2561 switch (status->kind)
2563 case TARGET_WAITKIND_IGNORE:
2567 case TARGET_WAITKIND_NO_HISTORY:
2568 VEC_safe_push (tp_t, no_history,
2569 VEC_ordered_remove (tp_t, moving, ix));
2573 eventing = VEC_unordered_remove (tp_t, moving, ix);
2579 if (eventing == NULL)
2581 /* We started with at least one moving thread. This thread must have
2582 either stopped or reached the end of its execution history.
2584 In the former case, EVENTING must not be NULL.
2585 In the latter case, NO_HISTORY must not be empty. */
2586 gdb_assert (!VEC_empty (tp_t, no_history));
2588 /* We kept threads moving at the end of their execution history. Stop
2589 EVENTING now that we are going to report its stop. */
2590 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2591 eventing->btrace.flags &= ~BTHR_MOVE;
2593 *status = btrace_step_no_history ();
2596 gdb_assert (eventing != NULL);
2598 /* We kept threads replaying at the end of their execution history. Stop
2599 replaying EVENTING now that we are going to report its stop. */
2600 record_btrace_stop_replaying_at_end (eventing);
2602 /* Stop all other threads. */
2603 if (!target_is_non_stop_p ())
2604 ALL_NON_EXITED_THREADS (tp)
2605 record_btrace_cancel_resume (tp);
2607 /* In async mode, we need to announce further events. */
2608 if (target_is_async_p ())
2609 record_btrace_maybe_mark_async_event (moving, no_history);
2611 /* Start record histories anew from the current position. */
2612 record_btrace_clear_histories (&eventing->btrace);
2614 /* We moved the replay position but did not update registers. */
2615 registers_changed_ptid (eventing->ptid);
2617 DEBUG ("wait ended by thread %s (%s): %s",
2618 print_thread_id (eventing),
2619 target_pid_to_str (eventing->ptid),
2620 target_waitstatus_to_string (status));
2622 do_cleanups (cleanups);
2623 return eventing->ptid;
2626 /* The to_stop method of target record-btrace. */
2629 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2631 DEBUG ("stop %s", target_pid_to_str (ptid));
2633 /* As long as we're not replaying, just forward the request. */
2634 if ((execution_direction != EXEC_REVERSE)
2635 && !record_btrace_is_replaying (ops, minus_one_ptid))
2638 ops->to_stop (ops, ptid);
2642 struct thread_info *tp;
2644 ALL_NON_EXITED_THREADS (tp)
2645 if (ptid_match (tp->ptid, ptid))
2647 tp->btrace.flags &= ~BTHR_MOVE;
2648 tp->btrace.flags |= BTHR_STOP;
2653 /* The to_can_execute_reverse method of target record-btrace. */
2656 record_btrace_can_execute_reverse (struct target_ops *self)
2661 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2664 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2666 if (record_btrace_is_replaying (ops, minus_one_ptid))
2668 struct thread_info *tp = inferior_thread ();
2670 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2673 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2676 /* The to_supports_stopped_by_sw_breakpoint method of target
2680 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2682 if (record_btrace_is_replaying (ops, minus_one_ptid))
2685 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2688 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2691 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2693 if (record_btrace_is_replaying (ops, minus_one_ptid))
2695 struct thread_info *tp = inferior_thread ();
2697 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2700 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2703 /* The to_supports_stopped_by_hw_breakpoint method of target
2707 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2709 if (record_btrace_is_replaying (ops, minus_one_ptid))
2712 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2715 /* The to_update_thread_list method of target record-btrace. */
2718 record_btrace_update_thread_list (struct target_ops *ops)
2720 /* We don't add or remove threads during replay. */
2721 if (record_btrace_is_replaying (ops, minus_one_ptid))
2724 /* Forward the request. */
2726 ops->to_update_thread_list (ops);
2729 /* The to_thread_alive method of target record-btrace. */
2732 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2734 /* We don't add or remove threads during replay. */
2735 if (record_btrace_is_replaying (ops, minus_one_ptid))
2736 return find_thread_ptid (ptid) != NULL;
2738 /* Forward the request. */
2740 return ops->to_thread_alive (ops, ptid);
2743 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2747 record_btrace_set_replay (struct thread_info *tp,
2748 const struct btrace_insn_iterator *it)
2750 struct btrace_thread_info *btinfo;
2752 btinfo = &tp->btrace;
2754 if (it == NULL || it->function == NULL)
2755 record_btrace_stop_replaying (tp);
2758 if (btinfo->replay == NULL)
2759 record_btrace_start_replaying (tp);
2760 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2763 *btinfo->replay = *it;
2764 registers_changed_ptid (tp->ptid);
2767 /* Start anew from the new replay position. */
2768 record_btrace_clear_histories (btinfo);
2770 stop_pc = regcache_read_pc (get_current_regcache ());
2771 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2774 /* The to_goto_record_begin method of target record-btrace. */
2777 record_btrace_goto_begin (struct target_ops *self)
2779 struct thread_info *tp;
2780 struct btrace_insn_iterator begin;
2782 tp = require_btrace_thread ();
2784 btrace_insn_begin (&begin, &tp->btrace);
2786 /* Skip gaps at the beginning of the trace. */
2787 while (btrace_insn_get (&begin) == NULL)
2791 steps = btrace_insn_next (&begin, 1);
2793 error (_("No trace."));
2796 record_btrace_set_replay (tp, &begin);
2799 /* The to_goto_record_end method of target record-btrace. */
2802 record_btrace_goto_end (struct target_ops *ops)
2804 struct thread_info *tp;
2806 tp = require_btrace_thread ();
2808 record_btrace_set_replay (tp, NULL);
2811 /* The to_goto_record method of target record-btrace. */
2814 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2816 struct thread_info *tp;
2817 struct btrace_insn_iterator it;
2818 unsigned int number;
2823 /* Check for wrap-arounds. */
2825 error (_("Instruction number out of range."));
2827 tp = require_btrace_thread ();
2829 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2831 error (_("No such instruction."));
2833 record_btrace_set_replay (tp, &it);
2836 /* The to_record_stop_replaying method of target record-btrace. */
2839 record_btrace_stop_replaying_all (struct target_ops *self)
2841 struct thread_info *tp;
2843 ALL_NON_EXITED_THREADS (tp)
2844 record_btrace_stop_replaying (tp);
2847 /* The to_execution_direction target method. */
2849 static enum exec_direction_kind
2850 record_btrace_execution_direction (struct target_ops *self)
2852 return record_btrace_resume_exec_dir;
2855 /* The to_prepare_to_generate_core target method. */
2858 record_btrace_prepare_to_generate_core (struct target_ops *self)
2860 record_btrace_generating_corefile = 1;
2863 /* The to_done_generating_core target method. */
2866 record_btrace_done_generating_core (struct target_ops *self)
2868 record_btrace_generating_corefile = 0;
2871 /* Initialize the record-btrace target ops. */
2874 init_record_btrace_ops (void)
2876 struct target_ops *ops;
2878 ops = &record_btrace_ops;
2879 ops->to_shortname = "record-btrace";
2880 ops->to_longname = "Branch tracing target";
2881 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2882 ops->to_open = record_btrace_open;
2883 ops->to_close = record_btrace_close;
2884 ops->to_async = record_btrace_async;
2885 ops->to_detach = record_detach;
2886 ops->to_disconnect = record_btrace_disconnect;
2887 ops->to_mourn_inferior = record_mourn_inferior;
2888 ops->to_kill = record_kill;
2889 ops->to_stop_recording = record_btrace_stop_recording;
2890 ops->to_info_record = record_btrace_info;
2891 ops->to_insn_history = record_btrace_insn_history;
2892 ops->to_insn_history_from = record_btrace_insn_history_from;
2893 ops->to_insn_history_range = record_btrace_insn_history_range;
2894 ops->to_call_history = record_btrace_call_history;
2895 ops->to_call_history_from = record_btrace_call_history_from;
2896 ops->to_call_history_range = record_btrace_call_history_range;
2897 ops->to_record_is_replaying = record_btrace_is_replaying;
2898 ops->to_record_will_replay = record_btrace_will_replay;
2899 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2900 ops->to_xfer_partial = record_btrace_xfer_partial;
2901 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2902 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2903 ops->to_fetch_registers = record_btrace_fetch_registers;
2904 ops->to_store_registers = record_btrace_store_registers;
2905 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2906 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2907 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2908 ops->to_resume = record_btrace_resume;
2909 ops->to_commit_resume = record_btrace_commit_resume;
2910 ops->to_wait = record_btrace_wait;
2911 ops->to_stop = record_btrace_stop;
2912 ops->to_update_thread_list = record_btrace_update_thread_list;
2913 ops->to_thread_alive = record_btrace_thread_alive;
2914 ops->to_goto_record_begin = record_btrace_goto_begin;
2915 ops->to_goto_record_end = record_btrace_goto_end;
2916 ops->to_goto_record = record_btrace_goto;
2917 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2918 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2919 ops->to_supports_stopped_by_sw_breakpoint
2920 = record_btrace_supports_stopped_by_sw_breakpoint;
2921 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2922 ops->to_supports_stopped_by_hw_breakpoint
2923 = record_btrace_supports_stopped_by_hw_breakpoint;
2924 ops->to_execution_direction = record_btrace_execution_direction;
2925 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2926 ops->to_done_generating_core = record_btrace_done_generating_core;
2927 ops->to_stratum = record_stratum;
2928 ops->to_magic = OPS_MAGIC;
2931 /* Start recording in BTS format. */
2934 cmd_record_btrace_bts_start (char *args, int from_tty)
2936 if (args != NULL && *args != 0)
2937 error (_("Invalid argument."));
2939 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2943 execute_command ("target record-btrace", from_tty);
2945 CATCH (exception, RETURN_MASK_ALL)
2947 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2948 throw_exception (exception);
2953 /* Start recording in Intel Processor Trace format. */
2956 cmd_record_btrace_pt_start (char *args, int from_tty)
2958 if (args != NULL && *args != 0)
2959 error (_("Invalid argument."));
2961 record_btrace_conf.format = BTRACE_FORMAT_PT;
2965 execute_command ("target record-btrace", from_tty);
2967 CATCH (exception, RETURN_MASK_ALL)
2969 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2970 throw_exception (exception);
2975 /* Alias for "target record". */
2978 cmd_record_btrace_start (char *args, int from_tty)
2980 if (args != NULL && *args != 0)
2981 error (_("Invalid argument."));
2983 record_btrace_conf.format = BTRACE_FORMAT_PT;
2987 execute_command ("target record-btrace", from_tty);
2989 CATCH (exception, RETURN_MASK_ALL)
2991 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2995 execute_command ("target record-btrace", from_tty);
2997 CATCH (exception, RETURN_MASK_ALL)
2999 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3000 throw_exception (exception);
3007 /* The "set record btrace" command. */
3010 cmd_set_record_btrace (char *args, int from_tty)
3012 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3015 /* The "show record btrace" command. */
3018 cmd_show_record_btrace (char *args, int from_tty)
3020 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3023 /* The "show record btrace replay-memory-access" command. */
3026 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3027 struct cmd_list_element *c, const char *value)
3029 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3030 replay_memory_access);
3033 /* The "set record btrace bts" command. */
3036 cmd_set_record_btrace_bts (char *args, int from_tty)
3038 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3039 "by an appropriate subcommand.\n"));
3040 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3041 all_commands, gdb_stdout);
3044 /* The "show record btrace bts" command. */
3047 cmd_show_record_btrace_bts (char *args, int from_tty)
3049 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3052 /* The "set record btrace pt" command. */
3055 cmd_set_record_btrace_pt (char *args, int from_tty)
3057 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3058 "by an appropriate subcommand.\n"));
3059 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3060 all_commands, gdb_stdout);
3063 /* The "show record btrace pt" command. */
3066 cmd_show_record_btrace_pt (char *args, int from_tty)
3068 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3071 /* The "record bts buffer-size" show value function. */
3074 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3075 struct cmd_list_element *c,
3078 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3082 /* The "record pt buffer-size" show value function. */
3085 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3086 struct cmd_list_element *c,
3089 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3093 void _initialize_record_btrace (void);
3095 /* Initialize btrace commands. */
3098 _initialize_record_btrace (void)
3100 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3101 _("Start branch trace recording."), &record_btrace_cmdlist,
3102 "record btrace ", 0, &record_cmdlist);
3103 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3105 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3107 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3108 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3109 This format may not be available on all processors."),
3110 &record_btrace_cmdlist);
3111 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3113 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3115 Start branch trace recording in Intel Processor Trace format.\n\n\
3116 This format may not be available on all processors."),
3117 &record_btrace_cmdlist);
3118 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3120 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3121 _("Set record options"), &set_record_btrace_cmdlist,
3122 "set record btrace ", 0, &set_record_cmdlist);
3124 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3125 _("Show record options"), &show_record_btrace_cmdlist,
3126 "show record btrace ", 0, &show_record_cmdlist);
3128 add_setshow_enum_cmd ("replay-memory-access", no_class,
3129 replay_memory_access_types, &replay_memory_access, _("\
3130 Set what memory accesses are allowed during replay."), _("\
3131 Show what memory accesses are allowed during replay."),
3132 _("Default is READ-ONLY.\n\n\
3133 The btrace record target does not trace data.\n\
3134 The memory therefore corresponds to the live target and not \
3135 to the current replay position.\n\n\
3136 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3137 When READ-WRITE, allow accesses to read-only and read-write memory during \
3139 NULL, cmd_show_replay_memory_access,
3140 &set_record_btrace_cmdlist,
3141 &show_record_btrace_cmdlist);
3143 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3144 _("Set record btrace bts options"),
3145 &set_record_btrace_bts_cmdlist,
3146 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3148 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3149 _("Show record btrace bts options"),
3150 &show_record_btrace_bts_cmdlist,
3151 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3153 add_setshow_uinteger_cmd ("buffer-size", no_class,
3154 &record_btrace_conf.bts.size,
3155 _("Set the record/replay bts buffer size."),
3156 _("Show the record/replay bts buffer size."), _("\
3157 When starting recording request a trace buffer of this size. \
3158 The actual buffer size may differ from the requested size. \
3159 Use \"info record\" to see the actual buffer size.\n\n\
3160 Bigger buffers allow longer recording but also take more time to process \
3161 the recorded execution trace.\n\n\
3162 The trace buffer size may not be changed while recording."), NULL,
3163 show_record_bts_buffer_size_value,
3164 &set_record_btrace_bts_cmdlist,
3165 &show_record_btrace_bts_cmdlist);
3167 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3168 _("Set record btrace pt options"),
3169 &set_record_btrace_pt_cmdlist,
3170 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3172 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3173 _("Show record btrace pt options"),
3174 &show_record_btrace_pt_cmdlist,
3175 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3177 add_setshow_uinteger_cmd ("buffer-size", no_class,
3178 &record_btrace_conf.pt.size,
3179 _("Set the record/replay pt buffer size."),
3180 _("Show the record/replay pt buffer size."), _("\
3181 Bigger buffers allow longer recording but also take more time to process \
3182 the recorded execution.\n\
3183 The actual buffer size may differ from the requested size. Use \"info record\" \
3184 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3185 &set_record_btrace_pt_cmdlist,
3186 &show_record_btrace_pt_cmdlist);
3188 init_record_btrace_ops ();
3189 add_target (&record_btrace_ops);
3191 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3194 record_btrace_conf.bts.size = 64 * 1024;
3195 record_btrace_conf.pt.size = 16 * 1024;