1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info *
110 require_btrace_thread (void)
112 struct thread_info *tp;
116 tp = find_thread_ptid (inferior_ptid);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info *
137 require_btrace (void)
139 struct thread_info *tp;
141 tp = require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info *tp)
153 btrace_enable (tp, &record_btrace_conf);
155 CATCH (error, RETURN_MASK_ERROR)
157 warning ("%s", error.message);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg)
167 struct thread_info *tp = (struct thread_info *) arg;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
203 inferior_event_handler (INF_REG_EVENT, NULL);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
220 record_btrace_generating_corefile = 0;
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args, int from_tty)
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
238 if (!target_has_execution)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer == NULL);
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
247 btrace_enable (tp, &record_btrace_conf);
249 make_cleanup (record_btrace_disable_callback, tp);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops *self)
262 struct thread_info *tp;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops *self, const char *args,
279 struct target_ops *beneath = self->beneath;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops *self)
293 struct thread_info *tp;
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops *ops, int enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
318 ops->beneath->to_async (ops->beneath, enable);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size)
330 if ((sz & ((1u << 30) - 1)) == 0)
335 else if ((sz & ((1u << 20) - 1)) == 0)
340 else if ((sz & ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config *conf)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
389 switch (conf->format)
391 case BTRACE_FORMAT_NONE:
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops *self)
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
418 tp = find_thread_ptid (inferior_ptid);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo = &tp->btrace;
426 conf = btrace_conf (btinfo);
428 record_btrace_print_conf (conf);
436 if (!btrace_is_empty (tp))
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
445 btrace_insn_end (&insn, btinfo);
447 insns = btrace_insn_number (&insn);
450 /* The last instruction does not really belong to the trace. */
457 /* Skip gaps at the end. */
460 steps = btrace_insn_prev (&insn, 1);
464 insns = btrace_insn_number (&insn);
469 gaps = btinfo->ngaps;
472 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
473 "for thread %s (%s).\n"), insns, calls, gaps,
474 print_thread_id (tp), target_pid_to_str (tp->ptid));
476 if (btrace_is_replaying (tp))
477 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
478 btrace_insn_number (btinfo->replay));
481 /* Print a decode error. */
484 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
485 enum btrace_format format)
490 errstr = _("unknown");
498 case BTRACE_FORMAT_BTS:
504 case BDE_BTS_OVERFLOW:
505 errstr = _("instruction overflow");
508 case BDE_BTS_INSN_SIZE:
509 errstr = _("unknown instruction");
514 #if defined (HAVE_LIBIPT)
515 case BTRACE_FORMAT_PT:
518 case BDE_PT_USER_QUIT:
520 errstr = _("trace decode cancelled");
523 case BDE_PT_DISABLED:
525 errstr = _("disabled");
528 case BDE_PT_OVERFLOW:
530 errstr = _("overflow");
535 errstr = pt_errstr (pt_errcode (errcode));
539 #endif /* defined (HAVE_LIBIPT) */
542 uiout->text (_("["));
545 uiout->text (_("decode error ("));
546 uiout->field_int ("errcode", errcode);
547 uiout->text (_("): "));
549 uiout->text (errstr);
550 uiout->text (_("]\n"));
553 /* Print an unsigned int. */
556 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
558 uiout->field_fmt (fld, "%u", val);
561 /* A range of source lines. */
563 struct btrace_line_range
565 /* The symtab this line is from. */
566 struct symtab *symtab;
568 /* The first line (inclusive). */
571 /* The last line (exclusive). */
575 /* Construct a line range. */
577 static struct btrace_line_range
578 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
580 struct btrace_line_range range;
582 range.symtab = symtab;
589 /* Add a line to a line range. */
591 static struct btrace_line_range
592 btrace_line_range_add (struct btrace_line_range range, int line)
594 if (range.end <= range.begin)
596 /* This is the first entry. */
598 range.end = line + 1;
600 else if (line < range.begin)
602 else if (range.end < line)
608 /* Return non-zero if RANGE is empty, zero otherwise. */
611 btrace_line_range_is_empty (struct btrace_line_range range)
613 return range.end <= range.begin;
616 /* Return non-zero if LHS contains RHS, zero otherwise. */
619 btrace_line_range_contains_range (struct btrace_line_range lhs,
620 struct btrace_line_range rhs)
622 return ((lhs.symtab == rhs.symtab)
623 && (lhs.begin <= rhs.begin)
624 && (rhs.end <= lhs.end));
627 /* Find the line range associated with PC. */
629 static struct btrace_line_range
630 btrace_find_line_range (CORE_ADDR pc)
632 struct btrace_line_range range;
633 struct linetable_entry *lines;
634 struct linetable *ltable;
635 struct symtab *symtab;
638 symtab = find_pc_line_symtab (pc);
640 return btrace_mk_line_range (NULL, 0, 0);
642 ltable = SYMTAB_LINETABLE (symtab);
644 return btrace_mk_line_range (symtab, 0, 0);
646 nlines = ltable->nitems;
647 lines = ltable->item;
649 return btrace_mk_line_range (symtab, 0, 0);
651 range = btrace_mk_line_range (symtab, 0, 0);
652 for (i = 0; i < nlines - 1; i++)
654 if ((lines[i].pc == pc) && (lines[i].line != 0))
655 range = btrace_line_range_add (range, lines[i].line);
661 /* Print source lines in LINES to UIOUT.
663 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
664 instructions corresponding to that source line. When printing a new source
665 line, we do the cleanups for the open chain and open a new cleanup chain for
666 the new source line. If the source line range in LINES is not empty, this
667 function will leave the cleanup chain for the last printed source line open
668 so instructions can be added to it. */
671 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
672 struct cleanup **ui_item_chain, int flags)
674 print_source_lines_flags psl_flags;
678 if (flags & DISASSEMBLY_FILENAME)
679 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
681 for (line = lines.begin; line < lines.end; ++line)
683 if (*ui_item_chain != NULL)
684 do_cleanups (*ui_item_chain);
687 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
689 print_source_lines (lines.symtab, line, line + 1, psl_flags);
691 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
695 /* Disassemble a section of the recorded instruction trace. */
698 btrace_insn_history (struct ui_out *uiout,
699 const struct btrace_thread_info *btinfo,
700 const struct btrace_insn_iterator *begin,
701 const struct btrace_insn_iterator *end, int flags)
704 struct cleanup *cleanups, *ui_item_chain;
705 struct gdbarch *gdbarch;
706 struct btrace_insn_iterator it;
707 struct btrace_line_range last_lines;
709 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
710 btrace_insn_number (end));
712 flags |= DISASSEMBLY_SPECULATIVE;
714 gdbarch = target_gdbarch ();
715 stb = mem_fileopen ();
716 cleanups = make_cleanup_ui_file_delete (stb);
717 gdb_disassembler di (gdbarch, stb);
718 last_lines = btrace_mk_line_range (NULL, 0, 0);
720 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
722 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
723 instructions corresponding to that line. */
724 ui_item_chain = NULL;
726 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
728 const struct btrace_insn *insn;
730 insn = btrace_insn_get (&it);
732 /* A NULL instruction indicates a gap in the trace. */
735 const struct btrace_config *conf;
737 conf = btrace_conf (btinfo);
739 /* We have trace so we must have a configuration. */
740 gdb_assert (conf != NULL);
742 btrace_ui_out_decode_error (uiout, it.function->errcode,
747 struct disasm_insn dinsn;
749 if ((flags & DISASSEMBLY_SOURCE) != 0)
751 struct btrace_line_range lines;
753 lines = btrace_find_line_range (insn->pc);
754 if (!btrace_line_range_is_empty (lines)
755 && !btrace_line_range_contains_range (last_lines, lines))
757 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
760 else if (ui_item_chain == NULL)
763 = make_cleanup_ui_out_tuple_begin_end (uiout,
765 /* No source information. */
766 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
769 gdb_assert (ui_item_chain != NULL);
772 memset (&dinsn, 0, sizeof (dinsn));
773 dinsn.number = btrace_insn_number (&it);
774 dinsn.addr = insn->pc;
776 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
777 dinsn.is_speculative = 1;
779 di.pretty_print_insn (uiout, &dinsn, flags);
783 do_cleanups (cleanups);
786 /* The to_insn_history method of target record-btrace. */
789 record_btrace_insn_history (struct target_ops *self, int size, int flags)
791 struct btrace_thread_info *btinfo;
792 struct btrace_insn_history *history;
793 struct btrace_insn_iterator begin, end;
794 struct cleanup *uiout_cleanup;
795 struct ui_out *uiout;
796 unsigned int context, covered;
798 uiout = current_uiout;
799 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
801 context = abs (size);
803 error (_("Bad record instruction-history-size."));
805 btinfo = require_btrace ();
806 history = btinfo->insn_history;
809 struct btrace_insn_iterator *replay;
811 DEBUG ("insn-history (0x%x): %d", flags, size);
813 /* If we're replaying, we start at the replay position. Otherwise, we
814 start at the tail of the trace. */
815 replay = btinfo->replay;
819 btrace_insn_end (&begin, btinfo);
821 /* We start from here and expand in the requested direction. Then we
822 expand in the other direction, as well, to fill up any remaining
827 /* We want the current position covered, as well. */
828 covered = btrace_insn_next (&end, 1);
829 covered += btrace_insn_prev (&begin, context - covered);
830 covered += btrace_insn_next (&end, context - covered);
834 covered = btrace_insn_next (&end, context);
835 covered += btrace_insn_prev (&begin, context - covered);
840 begin = history->begin;
843 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
844 btrace_insn_number (&begin), btrace_insn_number (&end));
849 covered = btrace_insn_prev (&begin, context);
854 covered = btrace_insn_next (&end, context);
859 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
863 printf_unfiltered (_("At the start of the branch trace record.\n"));
865 printf_unfiltered (_("At the end of the branch trace record.\n"));
868 btrace_set_insn_history (btinfo, &begin, &end);
869 do_cleanups (uiout_cleanup);
872 /* The to_insn_history_range method of target record-btrace. */
875 record_btrace_insn_history_range (struct target_ops *self,
876 ULONGEST from, ULONGEST to, int flags)
878 struct btrace_thread_info *btinfo;
879 struct btrace_insn_history *history;
880 struct btrace_insn_iterator begin, end;
881 struct cleanup *uiout_cleanup;
882 struct ui_out *uiout;
883 unsigned int low, high;
886 uiout = current_uiout;
887 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
892 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
894 /* Check for wrap-arounds. */
895 if (low != from || high != to)
896 error (_("Bad range."));
899 error (_("Bad range."));
901 btinfo = require_btrace ();
903 found = btrace_find_insn_by_number (&begin, btinfo, low);
905 error (_("Range out of bounds."));
907 found = btrace_find_insn_by_number (&end, btinfo, high);
910 /* Silently truncate the range. */
911 btrace_insn_end (&end, btinfo);
915 /* We want both begin and end to be inclusive. */
916 btrace_insn_next (&end, 1);
919 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
920 btrace_set_insn_history (btinfo, &begin, &end);
922 do_cleanups (uiout_cleanup);
925 /* The to_insn_history_from method of target record-btrace. */
928 record_btrace_insn_history_from (struct target_ops *self,
929 ULONGEST from, int size, int flags)
931 ULONGEST begin, end, context;
933 context = abs (size);
935 error (_("Bad record instruction-history-size."));
944 begin = from - context + 1;
949 end = from + context - 1;
951 /* Check for wrap-around. */
956 record_btrace_insn_history_range (self, begin, end, flags);
959 /* Print the instruction number range for a function call history line. */
962 btrace_call_history_insn_range (struct ui_out *uiout,
963 const struct btrace_function *bfun)
965 unsigned int begin, end, size;
967 size = VEC_length (btrace_insn_s, bfun->insn);
968 gdb_assert (size > 0);
970 begin = bfun->insn_offset;
971 end = begin + size - 1;
973 ui_out_field_uint (uiout, "insn begin", begin);
975 ui_out_field_uint (uiout, "insn end", end);
978 /* Compute the lowest and highest source line for the instructions in BFUN
979 and return them in PBEGIN and PEND.
980 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
981 result from inlining or macro expansion. */
984 btrace_compute_src_line_range (const struct btrace_function *bfun,
985 int *pbegin, int *pend)
987 struct btrace_insn *insn;
988 struct symtab *symtab;
1000 symtab = symbol_symtab (sym);
1002 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1004 struct symtab_and_line sal;
1006 sal = find_pc_line (insn->pc, 0);
1007 if (sal.symtab != symtab || sal.line == 0)
1010 begin = std::min (begin, sal.line);
1011 end = std::max (end, sal.line);
1019 /* Print the source line information for a function call history line. */
1022 btrace_call_history_src_line (struct ui_out *uiout,
1023 const struct btrace_function *bfun)
1032 uiout->field_string ("file",
1033 symtab_to_filename_for_display (symbol_symtab (sym)));
1035 btrace_compute_src_line_range (bfun, &begin, &end);
1040 uiout->field_int ("min line", begin);
1046 uiout->field_int ("max line", end);
1049 /* Get the name of a branch trace function. */
1052 btrace_get_bfun_name (const struct btrace_function *bfun)
1054 struct minimal_symbol *msym;
1064 return SYMBOL_PRINT_NAME (sym);
1065 else if (msym != NULL)
1066 return MSYMBOL_PRINT_NAME (msym);
1071 /* Disassemble a section of the recorded function trace. */
1074 btrace_call_history (struct ui_out *uiout,
1075 const struct btrace_thread_info *btinfo,
1076 const struct btrace_call_iterator *begin,
1077 const struct btrace_call_iterator *end,
1080 struct btrace_call_iterator it;
1081 record_print_flags flags = (enum record_print_flag) int_flags;
1083 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1084 btrace_call_number (end));
1086 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1088 const struct btrace_function *bfun;
1089 struct minimal_symbol *msym;
1092 bfun = btrace_call_get (&it);
1096 /* Print the function index. */
1097 ui_out_field_uint (uiout, "index", bfun->number);
1100 /* Indicate gaps in the trace. */
1101 if (bfun->errcode != 0)
1103 const struct btrace_config *conf;
1105 conf = btrace_conf (btinfo);
1107 /* We have trace so we must have a configuration. */
1108 gdb_assert (conf != NULL);
1110 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1115 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1117 int level = bfun->level + btinfo->level, i;
1119 for (i = 0; i < level; ++i)
1124 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1125 else if (msym != NULL)
1126 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1127 else if (!uiout->is_mi_like_p ())
1128 uiout->field_string ("function", "??");
1130 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1132 uiout->text (_("\tinst "));
1133 btrace_call_history_insn_range (uiout, bfun);
1136 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1138 uiout->text (_("\tat "));
1139 btrace_call_history_src_line (uiout, bfun);
1146 /* The to_call_history method of target record-btrace. */
1149 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1151 struct btrace_thread_info *btinfo;
1152 struct btrace_call_history *history;
1153 struct btrace_call_iterator begin, end;
1154 struct cleanup *uiout_cleanup;
1155 struct ui_out *uiout;
1156 unsigned int context, covered;
1157 record_print_flags flags = (enum record_print_flag) int_flags;
1159 uiout = current_uiout;
1160 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1162 context = abs (size);
1164 error (_("Bad record function-call-history-size."));
1166 btinfo = require_btrace ();
1167 history = btinfo->call_history;
1168 if (history == NULL)
1170 struct btrace_insn_iterator *replay;
1172 DEBUG ("call-history (0x%x): %d", int_flags, size);
1174 /* If we're replaying, we start at the replay position. Otherwise, we
1175 start at the tail of the trace. */
1176 replay = btinfo->replay;
1179 begin.function = replay->function;
1180 begin.btinfo = btinfo;
1183 btrace_call_end (&begin, btinfo);
1185 /* We start from here and expand in the requested direction. Then we
1186 expand in the other direction, as well, to fill up any remaining
1191 /* We want the current position covered, as well. */
1192 covered = btrace_call_next (&end, 1);
1193 covered += btrace_call_prev (&begin, context - covered);
1194 covered += btrace_call_next (&end, context - covered);
1198 covered = btrace_call_next (&end, context);
1199 covered += btrace_call_prev (&begin, context- covered);
1204 begin = history->begin;
1207 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1208 btrace_call_number (&begin), btrace_call_number (&end));
1213 covered = btrace_call_prev (&begin, context);
1218 covered = btrace_call_next (&end, context);
1223 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1227 printf_unfiltered (_("At the start of the branch trace record.\n"));
1229 printf_unfiltered (_("At the end of the branch trace record.\n"));
1232 btrace_set_call_history (btinfo, &begin, &end);
1233 do_cleanups (uiout_cleanup);
1236 /* The to_call_history_range method of target record-btrace. */
1239 record_btrace_call_history_range (struct target_ops *self,
1240 ULONGEST from, ULONGEST to,
1243 struct btrace_thread_info *btinfo;
1244 struct btrace_call_history *history;
1245 struct btrace_call_iterator begin, end;
1246 struct cleanup *uiout_cleanup;
1247 struct ui_out *uiout;
1248 unsigned int low, high;
1250 record_print_flags flags = (enum record_print_flag) int_flags;
1252 uiout = current_uiout;
1253 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1258 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1260 /* Check for wrap-arounds. */
1261 if (low != from || high != to)
1262 error (_("Bad range."));
1265 error (_("Bad range."));
1267 btinfo = require_btrace ();
1269 found = btrace_find_call_by_number (&begin, btinfo, low);
1271 error (_("Range out of bounds."));
1273 found = btrace_find_call_by_number (&end, btinfo, high);
1276 /* Silently truncate the range. */
1277 btrace_call_end (&end, btinfo);
1281 /* We want both begin and end to be inclusive. */
1282 btrace_call_next (&end, 1);
1285 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1286 btrace_set_call_history (btinfo, &begin, &end);
1288 do_cleanups (uiout_cleanup);
1291 /* The to_call_history_from method of target record-btrace. */
1294 record_btrace_call_history_from (struct target_ops *self,
1295 ULONGEST from, int size,
1298 ULONGEST begin, end, context;
1299 record_print_flags flags = (enum record_print_flag) int_flags;
1301 context = abs (size);
1303 error (_("Bad record function-call-history-size."));
1312 begin = from - context + 1;
1317 end = from + context - 1;
1319 /* Check for wrap-around. */
1324 record_btrace_call_history_range (self, begin, end, flags);
1327 /* The to_record_is_replaying method of target record-btrace. */
1330 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1332 struct thread_info *tp;
1334 ALL_NON_EXITED_THREADS (tp)
1335 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1341 /* The to_record_will_replay method of target record-btrace. */
1344 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1346 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1349 /* The to_xfer_partial method of target record-btrace. */
1351 static enum target_xfer_status
1352 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1353 const char *annex, gdb_byte *readbuf,
1354 const gdb_byte *writebuf, ULONGEST offset,
1355 ULONGEST len, ULONGEST *xfered_len)
1357 struct target_ops *t;
1359 /* Filter out requests that don't make sense during replay. */
1360 if (replay_memory_access == replay_memory_access_read_only
1361 && !record_btrace_generating_corefile
1362 && record_btrace_is_replaying (ops, inferior_ptid))
1366 case TARGET_OBJECT_MEMORY:
1368 struct target_section *section;
1370 /* We do not allow writing memory in general. */
1371 if (writebuf != NULL)
1374 return TARGET_XFER_UNAVAILABLE;
1377 /* We allow reading readonly memory. */
1378 section = target_section_by_addr (ops, offset);
1379 if (section != NULL)
1381 /* Check if the section we found is readonly. */
1382 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1383 section->the_bfd_section)
1384 & SEC_READONLY) != 0)
1386 /* Truncate the request to fit into this section. */
1387 len = std::min (len, section->endaddr - offset);
1393 return TARGET_XFER_UNAVAILABLE;
1398 /* Forward the request. */
1400 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1401 offset, len, xfered_len);
1404 /* The to_insert_breakpoint method of target record-btrace. */
1407 record_btrace_insert_breakpoint (struct target_ops *ops,
1408 struct gdbarch *gdbarch,
1409 struct bp_target_info *bp_tgt)
1414 /* Inserting breakpoints requires accessing memory. Allow it for the
1415 duration of this function. */
1416 old = replay_memory_access;
1417 replay_memory_access = replay_memory_access_read_write;
1422 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1424 CATCH (except, RETURN_MASK_ALL)
1426 replay_memory_access = old;
1427 throw_exception (except);
1430 replay_memory_access = old;
1435 /* The to_remove_breakpoint method of target record-btrace. */
1438 record_btrace_remove_breakpoint (struct target_ops *ops,
1439 struct gdbarch *gdbarch,
1440 struct bp_target_info *bp_tgt,
1441 enum remove_bp_reason reason)
1446 /* Removing breakpoints requires accessing memory. Allow it for the
1447 duration of this function. */
1448 old = replay_memory_access;
1449 replay_memory_access = replay_memory_access_read_write;
1454 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1457 CATCH (except, RETURN_MASK_ALL)
1459 replay_memory_access = old;
1460 throw_exception (except);
1463 replay_memory_access = old;
1468 /* The to_fetch_registers method of target record-btrace. */
1471 record_btrace_fetch_registers (struct target_ops *ops,
1472 struct regcache *regcache, int regno)
1474 struct btrace_insn_iterator *replay;
1475 struct thread_info *tp;
1477 tp = find_thread_ptid (inferior_ptid);
1478 gdb_assert (tp != NULL);
1480 replay = tp->btrace.replay;
1481 if (replay != NULL && !record_btrace_generating_corefile)
1483 const struct btrace_insn *insn;
1484 struct gdbarch *gdbarch;
1487 gdbarch = get_regcache_arch (regcache);
1488 pcreg = gdbarch_pc_regnum (gdbarch);
1492 /* We can only provide the PC register. */
1493 if (regno >= 0 && regno != pcreg)
1496 insn = btrace_insn_get (replay);
1497 gdb_assert (insn != NULL);
1499 regcache_raw_supply (regcache, regno, &insn->pc);
1503 struct target_ops *t = ops->beneath;
1505 t->to_fetch_registers (t, regcache, regno);
1509 /* The to_store_registers method of target record-btrace. */
1512 record_btrace_store_registers (struct target_ops *ops,
1513 struct regcache *regcache, int regno)
1515 struct target_ops *t;
1517 if (!record_btrace_generating_corefile
1518 && record_btrace_is_replaying (ops, inferior_ptid))
1519 error (_("Cannot write registers while replaying."));
1521 gdb_assert (may_write_registers != 0);
1524 t->to_store_registers (t, regcache, regno);
1527 /* The to_prepare_to_store method of target record-btrace. */
1530 record_btrace_prepare_to_store (struct target_ops *ops,
1531 struct regcache *regcache)
1533 struct target_ops *t;
1535 if (!record_btrace_generating_corefile
1536 && record_btrace_is_replaying (ops, inferior_ptid))
1540 t->to_prepare_to_store (t, regcache);
1543 /* The branch trace frame cache. */
1545 struct btrace_frame_cache
1548 struct thread_info *tp;
1550 /* The frame info. */
1551 struct frame_info *frame;
1553 /* The branch trace function segment. */
1554 const struct btrace_function *bfun;
1557 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1559 static htab_t bfcache;
1561 /* hash_f for htab_create_alloc of bfcache. */
1564 bfcache_hash (const void *arg)
1566 const struct btrace_frame_cache *cache
1567 = (const struct btrace_frame_cache *) arg;
1569 return htab_hash_pointer (cache->frame);
1572 /* eq_f for htab_create_alloc of bfcache. */
1575 bfcache_eq (const void *arg1, const void *arg2)
1577 const struct btrace_frame_cache *cache1
1578 = (const struct btrace_frame_cache *) arg1;
1579 const struct btrace_frame_cache *cache2
1580 = (const struct btrace_frame_cache *) arg2;
1582 return cache1->frame == cache2->frame;
1585 /* Create a new btrace frame cache. */
1587 static struct btrace_frame_cache *
1588 bfcache_new (struct frame_info *frame)
1590 struct btrace_frame_cache *cache;
1593 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1594 cache->frame = frame;
1596 slot = htab_find_slot (bfcache, cache, INSERT);
1597 gdb_assert (*slot == NULL);
1603 /* Extract the branch trace function from a branch trace frame. */
1605 static const struct btrace_function *
1606 btrace_get_frame_function (struct frame_info *frame)
1608 const struct btrace_frame_cache *cache;
1609 const struct btrace_function *bfun;
1610 struct btrace_frame_cache pattern;
1613 pattern.frame = frame;
1615 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1619 cache = (const struct btrace_frame_cache *) *slot;
1623 /* Implement stop_reason method for record_btrace_frame_unwind. */
1625 static enum unwind_stop_reason
1626 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun;
1632 cache = (const struct btrace_frame_cache *) *this_cache;
1634 gdb_assert (bfun != NULL);
1636 if (bfun->up == NULL)
1637 return UNWIND_UNAVAILABLE;
1639 return UNWIND_NO_REASON;
1642 /* Implement this_id method for record_btrace_frame_unwind. */
1645 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1646 struct frame_id *this_id)
1648 const struct btrace_frame_cache *cache;
1649 const struct btrace_function *bfun;
1650 CORE_ADDR code, special;
1652 cache = (const struct btrace_frame_cache *) *this_cache;
1655 gdb_assert (bfun != NULL);
1657 while (bfun->segment.prev != NULL)
1658 bfun = bfun->segment.prev;
1660 code = get_frame_func (this_frame);
1661 special = bfun->number;
1663 *this_id = frame_id_build_unavailable_stack_special (code, special);
1665 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1666 btrace_get_bfun_name (cache->bfun),
1667 core_addr_to_string_nz (this_id->code_addr),
1668 core_addr_to_string_nz (this_id->special_addr));
1671 /* Implement prev_register method for record_btrace_frame_unwind. */
1673 static struct value *
1674 record_btrace_frame_prev_register (struct frame_info *this_frame,
1678 const struct btrace_frame_cache *cache;
1679 const struct btrace_function *bfun, *caller;
1680 const struct btrace_insn *insn;
1681 struct gdbarch *gdbarch;
1685 gdbarch = get_frame_arch (this_frame);
1686 pcreg = gdbarch_pc_regnum (gdbarch);
1687 if (pcreg < 0 || regnum != pcreg)
1688 throw_error (NOT_AVAILABLE_ERROR,
1689 _("Registers are not available in btrace record history"));
1691 cache = (const struct btrace_frame_cache *) *this_cache;
1693 gdb_assert (bfun != NULL);
1697 throw_error (NOT_AVAILABLE_ERROR,
1698 _("No caller in btrace record history"));
1700 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1702 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1707 insn = VEC_last (btrace_insn_s, caller->insn);
1710 pc += gdb_insn_length (gdbarch, pc);
1713 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1714 btrace_get_bfun_name (bfun), bfun->level,
1715 core_addr_to_string_nz (pc));
1717 return frame_unwind_got_address (this_frame, regnum, pc);
1720 /* Implement sniffer method for record_btrace_frame_unwind. */
1723 record_btrace_frame_sniffer (const struct frame_unwind *self,
1724 struct frame_info *this_frame,
1727 const struct btrace_function *bfun;
1728 struct btrace_frame_cache *cache;
1729 struct thread_info *tp;
1730 struct frame_info *next;
1732 /* THIS_FRAME does not contain a reference to its thread. */
1733 tp = find_thread_ptid (inferior_ptid);
1734 gdb_assert (tp != NULL);
1737 next = get_next_frame (this_frame);
1740 const struct btrace_insn_iterator *replay;
1742 replay = tp->btrace.replay;
1744 bfun = replay->function;
1748 const struct btrace_function *callee;
1750 callee = btrace_get_frame_function (next);
1751 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1758 DEBUG ("[frame] sniffed frame for %s on level %d",
1759 btrace_get_bfun_name (bfun), bfun->level);
1761 /* This is our frame. Initialize the frame cache. */
1762 cache = bfcache_new (this_frame);
1766 *this_cache = cache;
1770 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1773 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1774 struct frame_info *this_frame,
1777 const struct btrace_function *bfun, *callee;
1778 struct btrace_frame_cache *cache;
1779 struct frame_info *next;
1781 next = get_next_frame (this_frame);
1785 callee = btrace_get_frame_function (next);
1789 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1796 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1801 cache->tp = find_thread_ptid (inferior_ptid);
1804 *this_cache = cache;
1809 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1811 struct btrace_frame_cache *cache;
1814 cache = (struct btrace_frame_cache *) this_cache;
1816 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1817 gdb_assert (slot != NULL);
1819 htab_remove_elt (bfcache, cache);
1822 /* btrace recording does not store previous memory content, neither the stack
1823 frames content. Any unwinding would return errorneous results as the stack
1824 contents no longer matches the changed PC value restored from history.
1825 Therefore this unwinder reports any possibly unwound registers as
1828 const struct frame_unwind record_btrace_frame_unwind =
1831 record_btrace_frame_unwind_stop_reason,
1832 record_btrace_frame_this_id,
1833 record_btrace_frame_prev_register,
1835 record_btrace_frame_sniffer,
1836 record_btrace_frame_dealloc_cache
1839 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1842 record_btrace_frame_unwind_stop_reason,
1843 record_btrace_frame_this_id,
1844 record_btrace_frame_prev_register,
1846 record_btrace_tailcall_frame_sniffer,
1847 record_btrace_frame_dealloc_cache
1850 /* Implement the to_get_unwinder method. */
1852 static const struct frame_unwind *
1853 record_btrace_to_get_unwinder (struct target_ops *self)
1855 return &record_btrace_frame_unwind;
1858 /* Implement the to_get_tailcall_unwinder method. */
1860 static const struct frame_unwind *
1861 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1863 return &record_btrace_tailcall_frame_unwind;
1866 /* Return a human-readable string for FLAG. */
1869 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1877 return "reverse-step";
1883 return "reverse-cont";
1892 /* Indicate that TP should be resumed according to FLAG. */
1895 record_btrace_resume_thread (struct thread_info *tp,
1896 enum btrace_thread_flag flag)
1898 struct btrace_thread_info *btinfo;
1900 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1901 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1903 btinfo = &tp->btrace;
1905 /* Fetch the latest branch trace. */
1908 /* A resume request overwrites a preceding resume or stop request. */
1909 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1910 btinfo->flags |= flag;
1913 /* Get the current frame for TP. */
1915 static struct frame_info *
1916 get_thread_current_frame (struct thread_info *tp)
1918 struct frame_info *frame;
1919 ptid_t old_inferior_ptid;
1922 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1923 old_inferior_ptid = inferior_ptid;
1924 inferior_ptid = tp->ptid;
1926 /* Clear the executing flag to allow changes to the current frame.
1927 We are not actually running, yet. We just started a reverse execution
1928 command or a record goto command.
1929 For the latter, EXECUTING is false and this has no effect.
1930 For the former, EXECUTING is true and we're in to_wait, about to
1931 move the thread. Since we need to recompute the stack, we temporarily
1932 set EXECUTING to flase. */
1933 executing = is_executing (inferior_ptid);
1934 set_executing (inferior_ptid, 0);
1939 frame = get_current_frame ();
1941 CATCH (except, RETURN_MASK_ALL)
1943 /* Restore the previous execution state. */
1944 set_executing (inferior_ptid, executing);
1946 /* Restore the previous inferior_ptid. */
1947 inferior_ptid = old_inferior_ptid;
1949 throw_exception (except);
1953 /* Restore the previous execution state. */
1954 set_executing (inferior_ptid, executing);
1956 /* Restore the previous inferior_ptid. */
1957 inferior_ptid = old_inferior_ptid;
1962 /* Start replaying a thread. */
1964 static struct btrace_insn_iterator *
1965 record_btrace_start_replaying (struct thread_info *tp)
1967 struct btrace_insn_iterator *replay;
1968 struct btrace_thread_info *btinfo;
1970 btinfo = &tp->btrace;
1973 /* We can't start replaying without trace. */
1974 if (btinfo->begin == NULL)
1977 /* GDB stores the current frame_id when stepping in order to detects steps
1979 Since frames are computed differently when we're replaying, we need to
1980 recompute those stored frames and fix them up so we can still detect
1981 subroutines after we started replaying. */
1984 struct frame_info *frame;
1985 struct frame_id frame_id;
1986 int upd_step_frame_id, upd_step_stack_frame_id;
1988 /* The current frame without replaying - computed via normal unwind. */
1989 frame = get_thread_current_frame (tp);
1990 frame_id = get_frame_id (frame);
1992 /* Check if we need to update any stepping-related frame id's. */
1993 upd_step_frame_id = frame_id_eq (frame_id,
1994 tp->control.step_frame_id);
1995 upd_step_stack_frame_id = frame_id_eq (frame_id,
1996 tp->control.step_stack_frame_id);
1998 /* We start replaying at the end of the branch trace. This corresponds
1999 to the current instruction. */
2000 replay = XNEW (struct btrace_insn_iterator);
2001 btrace_insn_end (replay, btinfo);
2003 /* Skip gaps at the end of the trace. */
2004 while (btrace_insn_get (replay) == NULL)
2008 steps = btrace_insn_prev (replay, 1);
2010 error (_("No trace."));
2013 /* We're not replaying, yet. */
2014 gdb_assert (btinfo->replay == NULL);
2015 btinfo->replay = replay;
2017 /* Make sure we're not using any stale registers. */
2018 registers_changed_ptid (tp->ptid);
2020 /* The current frame with replaying - computed via btrace unwind. */
2021 frame = get_thread_current_frame (tp);
2022 frame_id = get_frame_id (frame);
2024 /* Replace stepping related frames where necessary. */
2025 if (upd_step_frame_id)
2026 tp->control.step_frame_id = frame_id;
2027 if (upd_step_stack_frame_id)
2028 tp->control.step_stack_frame_id = frame_id;
2030 CATCH (except, RETURN_MASK_ALL)
2032 xfree (btinfo->replay);
2033 btinfo->replay = NULL;
2035 registers_changed_ptid (tp->ptid);
2037 throw_exception (except);
2044 /* Stop replaying a thread. */
2047 record_btrace_stop_replaying (struct thread_info *tp)
2049 struct btrace_thread_info *btinfo;
2051 btinfo = &tp->btrace;
2053 xfree (btinfo->replay);
2054 btinfo->replay = NULL;
2056 /* Make sure we're not leaving any stale registers. */
2057 registers_changed_ptid (tp->ptid);
2060 /* Stop replaying TP if it is at the end of its execution history. */
2063 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2065 struct btrace_insn_iterator *replay, end;
2066 struct btrace_thread_info *btinfo;
2068 btinfo = &tp->btrace;
2069 replay = btinfo->replay;
2074 btrace_insn_end (&end, btinfo);
2076 if (btrace_insn_cmp (replay, &end) == 0)
2077 record_btrace_stop_replaying (tp);
2080 /* The to_resume method of target record-btrace. */
2083 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2084 enum gdb_signal signal)
2086 struct thread_info *tp;
2087 enum btrace_thread_flag flag, cflag;
2089 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2090 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2091 step ? "step" : "cont");
2093 /* Store the execution direction of the last resume.
2095 If there is more than one to_resume call, we have to rely on infrun
2096 to not change the execution direction in-between. */
2097 record_btrace_resume_exec_dir = execution_direction;
2099 /* As long as we're not replaying, just forward the request.
2101 For non-stop targets this means that no thread is replaying. In order to
2102 make progress, we may need to explicitly move replaying threads to the end
2103 of their execution history. */
2104 if ((execution_direction != EXEC_REVERSE)
2105 && !record_btrace_is_replaying (ops, minus_one_ptid))
2108 ops->to_resume (ops, ptid, step, signal);
2112 /* Compute the btrace thread flag for the requested move. */
2113 if (execution_direction == EXEC_REVERSE)
2115 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2120 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2124 /* We just indicate the resume intent here. The actual stepping happens in
2125 record_btrace_wait below.
2127 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2128 if (!target_is_non_stop_p ())
2130 gdb_assert (ptid_match (inferior_ptid, ptid));
2132 ALL_NON_EXITED_THREADS (tp)
2133 if (ptid_match (tp->ptid, ptid))
2135 if (ptid_match (tp->ptid, inferior_ptid))
2136 record_btrace_resume_thread (tp, flag);
2138 record_btrace_resume_thread (tp, cflag);
2143 ALL_NON_EXITED_THREADS (tp)
2144 if (ptid_match (tp->ptid, ptid))
2145 record_btrace_resume_thread (tp, flag);
2148 /* Async support. */
2149 if (target_can_async_p ())
2152 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2156 /* The to_commit_resume method of target record-btrace. */
2159 record_btrace_commit_resume (struct target_ops *ops)
2161 if ((execution_direction != EXEC_REVERSE)
2162 && !record_btrace_is_replaying (ops, minus_one_ptid))
2163 ops->beneath->to_commit_resume (ops->beneath);
2166 /* Cancel resuming TP. */
2169 record_btrace_cancel_resume (struct thread_info *tp)
2171 enum btrace_thread_flag flags;
2173 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2177 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2178 print_thread_id (tp),
2179 target_pid_to_str (tp->ptid), flags,
2180 btrace_thread_flag_to_str (flags));
2182 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2183 record_btrace_stop_replaying_at_end (tp);
2186 /* Return a target_waitstatus indicating that we ran out of history. */
2188 static struct target_waitstatus
2189 btrace_step_no_history (void)
2191 struct target_waitstatus status;
2193 status.kind = TARGET_WAITKIND_NO_HISTORY;
2198 /* Return a target_waitstatus indicating that a step finished. */
2200 static struct target_waitstatus
2201 btrace_step_stopped (void)
2203 struct target_waitstatus status;
2205 status.kind = TARGET_WAITKIND_STOPPED;
2206 status.value.sig = GDB_SIGNAL_TRAP;
2211 /* Return a target_waitstatus indicating that a thread was stopped as
2214 static struct target_waitstatus
2215 btrace_step_stopped_on_request (void)
2217 struct target_waitstatus status;
2219 status.kind = TARGET_WAITKIND_STOPPED;
2220 status.value.sig = GDB_SIGNAL_0;
2225 /* Return a target_waitstatus indicating a spurious stop. */
2227 static struct target_waitstatus
2228 btrace_step_spurious (void)
2230 struct target_waitstatus status;
2232 status.kind = TARGET_WAITKIND_SPURIOUS;
2237 /* Return a target_waitstatus indicating that the thread was not resumed. */
2239 static struct target_waitstatus
2240 btrace_step_no_resumed (void)
2242 struct target_waitstatus status;
2244 status.kind = TARGET_WAITKIND_NO_RESUMED;
2249 /* Return a target_waitstatus indicating that we should wait again. */
2251 static struct target_waitstatus
2252 btrace_step_again (void)
2254 struct target_waitstatus status;
2256 status.kind = TARGET_WAITKIND_IGNORE;
2261 /* Clear the record histories. */
2264 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2266 xfree (btinfo->insn_history);
2267 xfree (btinfo->call_history);
2269 btinfo->insn_history = NULL;
2270 btinfo->call_history = NULL;
2273 /* Check whether TP's current replay position is at a breakpoint. */
2276 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2278 struct btrace_insn_iterator *replay;
2279 struct btrace_thread_info *btinfo;
2280 const struct btrace_insn *insn;
2281 struct inferior *inf;
2283 btinfo = &tp->btrace;
2284 replay = btinfo->replay;
2289 insn = btrace_insn_get (replay);
2293 inf = find_inferior_ptid (tp->ptid);
2297 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2298 &btinfo->stop_reason);
2301 /* Step one instruction in forward direction. */
2303 static struct target_waitstatus
2304 record_btrace_single_step_forward (struct thread_info *tp)
2306 struct btrace_insn_iterator *replay, end, start;
2307 struct btrace_thread_info *btinfo;
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2312 /* We're done if we're not replaying. */
2314 return btrace_step_no_history ();
2316 /* Check if we're stepping a breakpoint. */
2317 if (record_btrace_replay_at_breakpoint (tp))
2318 return btrace_step_stopped ();
2320 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2321 jump back to the instruction at which we started. */
2327 /* We will bail out here if we continue stepping after reaching the end
2328 of the execution history. */
2329 steps = btrace_insn_next (replay, 1);
2333 return btrace_step_no_history ();
2336 while (btrace_insn_get (replay) == NULL);
2338 /* Determine the end of the instruction trace. */
2339 btrace_insn_end (&end, btinfo);
2341 /* The execution trace contains (and ends with) the current instruction.
2342 This instruction has not been executed, yet, so the trace really ends
2343 one instruction earlier. */
2344 if (btrace_insn_cmp (replay, &end) == 0)
2345 return btrace_step_no_history ();
2347 return btrace_step_spurious ();
2350 /* Step one instruction in backward direction. */
2352 static struct target_waitstatus
2353 record_btrace_single_step_backward (struct thread_info *tp)
2355 struct btrace_insn_iterator *replay, start;
2356 struct btrace_thread_info *btinfo;
2358 btinfo = &tp->btrace;
2359 replay = btinfo->replay;
2361 /* Start replaying if we're not already doing so. */
2363 replay = record_btrace_start_replaying (tp);
2365 /* If we can't step any further, we reached the end of the history.
2366 Skip gaps during replay. If we end up at a gap (at the beginning of
2367 the trace), jump back to the instruction at which we started. */
2373 steps = btrace_insn_prev (replay, 1);
2377 return btrace_step_no_history ();
2380 while (btrace_insn_get (replay) == NULL);
2382 /* Check if we're stepping a breakpoint.
2384 For reverse-stepping, this check is after the step. There is logic in
2385 infrun.c that handles reverse-stepping separately. See, for example,
2386 proceed and adjust_pc_after_break.
2388 This code assumes that for reverse-stepping, PC points to the last
2389 de-executed instruction, whereas for forward-stepping PC points to the
2390 next to-be-executed instruction. */
2391 if (record_btrace_replay_at_breakpoint (tp))
2392 return btrace_step_stopped ();
2394 return btrace_step_spurious ();
2397 /* Step a single thread. */
2399 static struct target_waitstatus
2400 record_btrace_step_thread (struct thread_info *tp)
2402 struct btrace_thread_info *btinfo;
2403 struct target_waitstatus status;
2404 enum btrace_thread_flag flags;
2406 btinfo = &tp->btrace;
2408 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2409 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2411 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2412 target_pid_to_str (tp->ptid), flags,
2413 btrace_thread_flag_to_str (flags));
2415 /* We can't step without an execution history. */
2416 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2417 return btrace_step_no_history ();
2422 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2425 return btrace_step_stopped_on_request ();
2428 status = record_btrace_single_step_forward (tp);
2429 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2432 return btrace_step_stopped ();
2435 status = record_btrace_single_step_backward (tp);
2436 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2439 return btrace_step_stopped ();
2442 status = record_btrace_single_step_forward (tp);
2443 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2446 btinfo->flags |= flags;
2447 return btrace_step_again ();
2450 status = record_btrace_single_step_backward (tp);
2451 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2454 btinfo->flags |= flags;
2455 return btrace_step_again ();
2458 /* We keep threads moving at the end of their execution history. The to_wait
2459 method will stop the thread for whom the event is reported. */
2460 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2461 btinfo->flags |= flags;
2466 /* A vector of threads. */
2468 typedef struct thread_info * tp_t;
2471 /* Announce further events if necessary. */
2474 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2475 const VEC (tp_t) *no_history)
2477 int more_moving, more_no_history;
2479 more_moving = !VEC_empty (tp_t, moving);
2480 more_no_history = !VEC_empty (tp_t, no_history);
2482 if (!more_moving && !more_no_history)
2486 DEBUG ("movers pending");
2488 if (more_no_history)
2489 DEBUG ("no-history pending");
2491 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2494 /* The to_wait method of target record-btrace. */
2497 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2498 struct target_waitstatus *status, int options)
2500 VEC (tp_t) *moving, *no_history;
2501 struct thread_info *tp, *eventing;
2502 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2504 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2506 /* As long as we're not replaying, just forward the request. */
2507 if ((execution_direction != EXEC_REVERSE)
2508 && !record_btrace_is_replaying (ops, minus_one_ptid))
2511 return ops->to_wait (ops, ptid, status, options);
2517 make_cleanup (VEC_cleanup (tp_t), &moving);
2518 make_cleanup (VEC_cleanup (tp_t), &no_history);
2520 /* Keep a work list of moving threads. */
2521 ALL_NON_EXITED_THREADS (tp)
2522 if (ptid_match (tp->ptid, ptid)
2523 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2524 VEC_safe_push (tp_t, moving, tp);
2526 if (VEC_empty (tp_t, moving))
2528 *status = btrace_step_no_resumed ();
2530 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2531 target_waitstatus_to_string (status));
2533 do_cleanups (cleanups);
2537 /* Step moving threads one by one, one step each, until either one thread
2538 reports an event or we run out of threads to step.
2540 When stepping more than one thread, chances are that some threads reach
2541 the end of their execution history earlier than others. If we reported
2542 this immediately, all-stop on top of non-stop would stop all threads and
2543 resume the same threads next time. And we would report the same thread
2544 having reached the end of its execution history again.
2546 In the worst case, this would starve the other threads. But even if other
2547 threads would be allowed to make progress, this would result in far too
2548 many intermediate stops.
2550 We therefore delay the reporting of "no execution history" until we have
2551 nothing else to report. By this time, all threads should have moved to
2552 either the beginning or the end of their execution history. There will
2553 be a single user-visible stop. */
2555 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2560 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2562 *status = record_btrace_step_thread (tp);
2564 switch (status->kind)
2566 case TARGET_WAITKIND_IGNORE:
2570 case TARGET_WAITKIND_NO_HISTORY:
2571 VEC_safe_push (tp_t, no_history,
2572 VEC_ordered_remove (tp_t, moving, ix));
2576 eventing = VEC_unordered_remove (tp_t, moving, ix);
2582 if (eventing == NULL)
2584 /* We started with at least one moving thread. This thread must have
2585 either stopped or reached the end of its execution history.
2587 In the former case, EVENTING must not be NULL.
2588 In the latter case, NO_HISTORY must not be empty. */
2589 gdb_assert (!VEC_empty (tp_t, no_history));
2591 /* We kept threads moving at the end of their execution history. Stop
2592 EVENTING now that we are going to report its stop. */
2593 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2594 eventing->btrace.flags &= ~BTHR_MOVE;
2596 *status = btrace_step_no_history ();
2599 gdb_assert (eventing != NULL);
2601 /* We kept threads replaying at the end of their execution history. Stop
2602 replaying EVENTING now that we are going to report its stop. */
2603 record_btrace_stop_replaying_at_end (eventing);
2605 /* Stop all other threads. */
2606 if (!target_is_non_stop_p ())
2607 ALL_NON_EXITED_THREADS (tp)
2608 record_btrace_cancel_resume (tp);
2610 /* In async mode, we need to announce further events. */
2611 if (target_is_async_p ())
2612 record_btrace_maybe_mark_async_event (moving, no_history);
2614 /* Start record histories anew from the current position. */
2615 record_btrace_clear_histories (&eventing->btrace);
2617 /* We moved the replay position but did not update registers. */
2618 registers_changed_ptid (eventing->ptid);
2620 DEBUG ("wait ended by thread %s (%s): %s",
2621 print_thread_id (eventing),
2622 target_pid_to_str (eventing->ptid),
2623 target_waitstatus_to_string (status));
2625 do_cleanups (cleanups);
2626 return eventing->ptid;
2629 /* The to_stop method of target record-btrace. */
2632 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2634 DEBUG ("stop %s", target_pid_to_str (ptid));
2636 /* As long as we're not replaying, just forward the request. */
2637 if ((execution_direction != EXEC_REVERSE)
2638 && !record_btrace_is_replaying (ops, minus_one_ptid))
2641 ops->to_stop (ops, ptid);
2645 struct thread_info *tp;
2647 ALL_NON_EXITED_THREADS (tp)
2648 if (ptid_match (tp->ptid, ptid))
2650 tp->btrace.flags &= ~BTHR_MOVE;
2651 tp->btrace.flags |= BTHR_STOP;
2656 /* The to_can_execute_reverse method of target record-btrace. */
2659 record_btrace_can_execute_reverse (struct target_ops *self)
2664 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2667 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2669 if (record_btrace_is_replaying (ops, minus_one_ptid))
2671 struct thread_info *tp = inferior_thread ();
2673 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2676 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2679 /* The to_supports_stopped_by_sw_breakpoint method of target
2683 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2685 if (record_btrace_is_replaying (ops, minus_one_ptid))
2688 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2691 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2694 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2696 if (record_btrace_is_replaying (ops, minus_one_ptid))
2698 struct thread_info *tp = inferior_thread ();
2700 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2703 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2706 /* The to_supports_stopped_by_hw_breakpoint method of target
2710 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2712 if (record_btrace_is_replaying (ops, minus_one_ptid))
2715 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2718 /* The to_update_thread_list method of target record-btrace. */
2721 record_btrace_update_thread_list (struct target_ops *ops)
2723 /* We don't add or remove threads during replay. */
2724 if (record_btrace_is_replaying (ops, minus_one_ptid))
2727 /* Forward the request. */
2729 ops->to_update_thread_list (ops);
2732 /* The to_thread_alive method of target record-btrace. */
2735 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2737 /* We don't add or remove threads during replay. */
2738 if (record_btrace_is_replaying (ops, minus_one_ptid))
2739 return find_thread_ptid (ptid) != NULL;
2741 /* Forward the request. */
2743 return ops->to_thread_alive (ops, ptid);
2746 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2750 record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2753 struct btrace_thread_info *btinfo;
2755 btinfo = &tp->btrace;
2757 if (it == NULL || it->function == NULL)
2758 record_btrace_stop_replaying (tp);
2761 if (btinfo->replay == NULL)
2762 record_btrace_start_replaying (tp);
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2766 *btinfo->replay = *it;
2767 registers_changed_ptid (tp->ptid);
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
2773 stop_pc = regcache_read_pc (get_current_regcache ());
2774 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2777 /* The to_goto_record_begin method of target record-btrace. */
2780 record_btrace_goto_begin (struct target_ops *self)
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator begin;
2785 tp = require_btrace_thread ();
2787 btrace_insn_begin (&begin, &tp->btrace);
2789 /* Skip gaps at the beginning of the trace. */
2790 while (btrace_insn_get (&begin) == NULL)
2794 steps = btrace_insn_next (&begin, 1);
2796 error (_("No trace."));
2799 record_btrace_set_replay (tp, &begin);
2802 /* The to_goto_record_end method of target record-btrace. */
2805 record_btrace_goto_end (struct target_ops *ops)
2807 struct thread_info *tp;
2809 tp = require_btrace_thread ();
2811 record_btrace_set_replay (tp, NULL);
2814 /* The to_goto_record method of target record-btrace. */
2817 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2819 struct thread_info *tp;
2820 struct btrace_insn_iterator it;
2821 unsigned int number;
2826 /* Check for wrap-arounds. */
2828 error (_("Instruction number out of range."));
2830 tp = require_btrace_thread ();
2832 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2834 error (_("No such instruction."));
2836 record_btrace_set_replay (tp, &it);
2839 /* The to_record_stop_replaying method of target record-btrace. */
2842 record_btrace_stop_replaying_all (struct target_ops *self)
2844 struct thread_info *tp;
2846 ALL_NON_EXITED_THREADS (tp)
2847 record_btrace_stop_replaying (tp);
2850 /* The to_execution_direction target method. */
2852 static enum exec_direction_kind
2853 record_btrace_execution_direction (struct target_ops *self)
2855 return record_btrace_resume_exec_dir;
2858 /* The to_prepare_to_generate_core target method. */
2861 record_btrace_prepare_to_generate_core (struct target_ops *self)
2863 record_btrace_generating_corefile = 1;
2866 /* The to_done_generating_core target method. */
2869 record_btrace_done_generating_core (struct target_ops *self)
2871 record_btrace_generating_corefile = 0;
2874 /* Initialize the record-btrace target ops. */
2877 init_record_btrace_ops (void)
2879 struct target_ops *ops;
2881 ops = &record_btrace_ops;
2882 ops->to_shortname = "record-btrace";
2883 ops->to_longname = "Branch tracing target";
2884 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2885 ops->to_open = record_btrace_open;
2886 ops->to_close = record_btrace_close;
2887 ops->to_async = record_btrace_async;
2888 ops->to_detach = record_detach;
2889 ops->to_disconnect = record_btrace_disconnect;
2890 ops->to_mourn_inferior = record_mourn_inferior;
2891 ops->to_kill = record_kill;
2892 ops->to_stop_recording = record_btrace_stop_recording;
2893 ops->to_info_record = record_btrace_info;
2894 ops->to_insn_history = record_btrace_insn_history;
2895 ops->to_insn_history_from = record_btrace_insn_history_from;
2896 ops->to_insn_history_range = record_btrace_insn_history_range;
2897 ops->to_call_history = record_btrace_call_history;
2898 ops->to_call_history_from = record_btrace_call_history_from;
2899 ops->to_call_history_range = record_btrace_call_history_range;
2900 ops->to_record_is_replaying = record_btrace_is_replaying;
2901 ops->to_record_will_replay = record_btrace_will_replay;
2902 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2903 ops->to_xfer_partial = record_btrace_xfer_partial;
2904 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2905 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2906 ops->to_fetch_registers = record_btrace_fetch_registers;
2907 ops->to_store_registers = record_btrace_store_registers;
2908 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2909 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2910 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2911 ops->to_resume = record_btrace_resume;
2912 ops->to_commit_resume = record_btrace_commit_resume;
2913 ops->to_wait = record_btrace_wait;
2914 ops->to_stop = record_btrace_stop;
2915 ops->to_update_thread_list = record_btrace_update_thread_list;
2916 ops->to_thread_alive = record_btrace_thread_alive;
2917 ops->to_goto_record_begin = record_btrace_goto_begin;
2918 ops->to_goto_record_end = record_btrace_goto_end;
2919 ops->to_goto_record = record_btrace_goto;
2920 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2921 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2922 ops->to_supports_stopped_by_sw_breakpoint
2923 = record_btrace_supports_stopped_by_sw_breakpoint;
2924 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2925 ops->to_supports_stopped_by_hw_breakpoint
2926 = record_btrace_supports_stopped_by_hw_breakpoint;
2927 ops->to_execution_direction = record_btrace_execution_direction;
2928 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2929 ops->to_done_generating_core = record_btrace_done_generating_core;
2930 ops->to_stratum = record_stratum;
2931 ops->to_magic = OPS_MAGIC;
2934 /* Start recording in BTS format. */
2937 cmd_record_btrace_bts_start (char *args, int from_tty)
2939 if (args != NULL && *args != 0)
2940 error (_("Invalid argument."));
2942 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2946 execute_command ("target record-btrace", from_tty);
2948 CATCH (exception, RETURN_MASK_ALL)
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2951 throw_exception (exception);
2956 /* Start recording in Intel Processor Trace format. */
2959 cmd_record_btrace_pt_start (char *args, int from_tty)
2961 if (args != NULL && *args != 0)
2962 error (_("Invalid argument."));
2964 record_btrace_conf.format = BTRACE_FORMAT_PT;
2968 execute_command ("target record-btrace", from_tty);
2970 CATCH (exception, RETURN_MASK_ALL)
2972 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2973 throw_exception (exception);
2978 /* Alias for "target record". */
2981 cmd_record_btrace_start (char *args, int from_tty)
2983 if (args != NULL && *args != 0)
2984 error (_("Invalid argument."));
2986 record_btrace_conf.format = BTRACE_FORMAT_PT;
2990 execute_command ("target record-btrace", from_tty);
2992 CATCH (exception, RETURN_MASK_ALL)
2994 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2998 execute_command ("target record-btrace", from_tty);
3000 CATCH (exception, RETURN_MASK_ALL)
3002 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3003 throw_exception (exception);
3010 /* The "set record btrace" command. */
3013 cmd_set_record_btrace (char *args, int from_tty)
3015 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3018 /* The "show record btrace" command. */
3021 cmd_show_record_btrace (char *args, int from_tty)
3023 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3026 /* The "show record btrace replay-memory-access" command. */
3029 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3030 struct cmd_list_element *c, const char *value)
3032 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3033 replay_memory_access);
3036 /* The "set record btrace bts" command. */
3039 cmd_set_record_btrace_bts (char *args, int from_tty)
3041 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3042 "by an appropriate subcommand.\n"));
3043 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3044 all_commands, gdb_stdout);
3047 /* The "show record btrace bts" command. */
3050 cmd_show_record_btrace_bts (char *args, int from_tty)
3052 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3055 /* The "set record btrace pt" command. */
3058 cmd_set_record_btrace_pt (char *args, int from_tty)
3060 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3061 "by an appropriate subcommand.\n"));
3062 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3063 all_commands, gdb_stdout);
3066 /* The "show record btrace pt" command. */
3069 cmd_show_record_btrace_pt (char *args, int from_tty)
3071 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3074 /* The "record bts buffer-size" show value function. */
3077 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3078 struct cmd_list_element *c,
3081 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3085 /* The "record pt buffer-size" show value function. */
3088 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3089 struct cmd_list_element *c,
3092 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3096 void _initialize_record_btrace (void);
3098 /* Initialize btrace commands. */
3101 _initialize_record_btrace (void)
3103 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3104 _("Start branch trace recording."), &record_btrace_cmdlist,
3105 "record btrace ", 0, &record_cmdlist);
3106 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3108 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3110 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3111 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3112 This format may not be available on all processors."),
3113 &record_btrace_cmdlist);
3114 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3116 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3118 Start branch trace recording in Intel Processor Trace format.\n\n\
3119 This format may not be available on all processors."),
3120 &record_btrace_cmdlist);
3121 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3123 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3124 _("Set record options"), &set_record_btrace_cmdlist,
3125 "set record btrace ", 0, &set_record_cmdlist);
3127 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3128 _("Show record options"), &show_record_btrace_cmdlist,
3129 "show record btrace ", 0, &show_record_cmdlist);
3131 add_setshow_enum_cmd ("replay-memory-access", no_class,
3132 replay_memory_access_types, &replay_memory_access, _("\
3133 Set what memory accesses are allowed during replay."), _("\
3134 Show what memory accesses are allowed during replay."),
3135 _("Default is READ-ONLY.\n\n\
3136 The btrace record target does not trace data.\n\
3137 The memory therefore corresponds to the live target and not \
3138 to the current replay position.\n\n\
3139 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3140 When READ-WRITE, allow accesses to read-only and read-write memory during \
3142 NULL, cmd_show_replay_memory_access,
3143 &set_record_btrace_cmdlist,
3144 &show_record_btrace_cmdlist);
3146 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3147 _("Set record btrace bts options"),
3148 &set_record_btrace_bts_cmdlist,
3149 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3151 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3152 _("Show record btrace bts options"),
3153 &show_record_btrace_bts_cmdlist,
3154 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3156 add_setshow_uinteger_cmd ("buffer-size", no_class,
3157 &record_btrace_conf.bts.size,
3158 _("Set the record/replay bts buffer size."),
3159 _("Show the record/replay bts buffer size."), _("\
3160 When starting recording request a trace buffer of this size. \
3161 The actual buffer size may differ from the requested size. \
3162 Use \"info record\" to see the actual buffer size.\n\n\
3163 Bigger buffers allow longer recording but also take more time to process \
3164 the recorded execution trace.\n\n\
3165 The trace buffer size may not be changed while recording."), NULL,
3166 show_record_bts_buffer_size_value,
3167 &set_record_btrace_bts_cmdlist,
3168 &show_record_btrace_bts_cmdlist);
3170 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3171 _("Set record btrace pt options"),
3172 &set_record_btrace_pt_cmdlist,
3173 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3175 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3176 _("Show record btrace pt options"),
3177 &show_record_btrace_pt_cmdlist,
3178 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3180 add_setshow_uinteger_cmd ("buffer-size", no_class,
3181 &record_btrace_conf.pt.size,
3182 _("Set the record/replay pt buffer size."),
3183 _("Show the record/replay pt buffer size."), _("\
3184 Bigger buffers allow longer recording but also take more time to process \
3185 the recorded execution.\n\
3186 The actual buffer size may differ from the requested size. Use \"info record\" \
3187 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3188 &set_record_btrace_pt_cmdlist,
3189 &show_record_btrace_pt_cmdlist);
3191 init_record_btrace_ops ();
3192 add_target (&record_btrace_ops);
3194 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3197 record_btrace_conf.bts.size = 64 * 1024;
3198 record_btrace_conf.pt.size = 16 * 1024;