1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info *
110 require_btrace_thread (void)
112 struct thread_info *tp;
116 tp = find_thread_ptid (inferior_ptid);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info *
137 require_btrace (void)
139 struct thread_info *tp;
141 tp = require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info *tp)
153 btrace_enable (tp, &record_btrace_conf);
155 CATCH (error, RETURN_MASK_ERROR)
157 warning ("%s", error.message);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg)
167 struct thread_info *tp = (struct thread_info *) arg;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
203 inferior_event_handler (INF_REG_EVENT, NULL);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
220 record_btrace_generating_corefile = 0;
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args, int from_tty)
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
238 if (!target_has_execution)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer == NULL);
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
247 btrace_enable (tp, &record_btrace_conf);
249 make_cleanup (record_btrace_disable_callback, tp);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops *self)
262 struct thread_info *tp;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops *self, const char *args,
279 struct target_ops *beneath = self->beneath;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops *self)
293 struct thread_info *tp;
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops *ops, int enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
318 ops->beneath->to_async (ops->beneath, enable);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size)
330 if ((sz & ((1u << 30) - 1)) == 0)
335 else if ((sz & ((1u << 20) - 1)) == 0)
340 else if ((sz & ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config *conf)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
389 switch (conf->format)
391 case BTRACE_FORMAT_NONE:
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops *self)
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
418 tp = find_thread_ptid (inferior_ptid);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo = &tp->btrace;
426 conf = btrace_conf (btinfo);
428 record_btrace_print_conf (conf);
436 if (!btrace_is_empty (tp))
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
453 gaps = btinfo->ngaps;
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
465 /* Print a decode error. */
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
474 errstr = _("unknown");
482 case BTRACE_FORMAT_BTS:
488 case BDE_BTS_OVERFLOW:
489 errstr = _("instruction overflow");
492 case BDE_BTS_INSN_SIZE:
493 errstr = _("unknown instruction");
498 #if defined (HAVE_LIBIPT)
499 case BTRACE_FORMAT_PT:
502 case BDE_PT_USER_QUIT:
504 errstr = _("trace decode cancelled");
507 case BDE_PT_DISABLED:
509 errstr = _("disabled");
512 case BDE_PT_OVERFLOW:
514 errstr = _("overflow");
519 errstr = pt_errstr (pt_errcode (errcode));
523 #endif /* defined (HAVE_LIBIPT) */
526 uiout->text (_("["));
529 uiout->text (_("decode error ("));
530 uiout->field_int ("errcode", errcode);
531 uiout->text (_("): "));
533 uiout->text (errstr);
534 uiout->text (_("]\n"));
537 /* Print an unsigned int. */
540 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
542 uiout->field_fmt (fld, "%u", val);
545 /* A range of source lines. */
547 struct btrace_line_range
549 /* The symtab this line is from. */
550 struct symtab *symtab;
552 /* The first line (inclusive). */
555 /* The last line (exclusive). */
559 /* Construct a line range. */
561 static struct btrace_line_range
562 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
564 struct btrace_line_range range;
566 range.symtab = symtab;
573 /* Add a line to a line range. */
575 static struct btrace_line_range
576 btrace_line_range_add (struct btrace_line_range range, int line)
578 if (range.end <= range.begin)
580 /* This is the first entry. */
582 range.end = line + 1;
584 else if (line < range.begin)
586 else if (range.end < line)
592 /* Return non-zero if RANGE is empty, zero otherwise. */
595 btrace_line_range_is_empty (struct btrace_line_range range)
597 return range.end <= range.begin;
600 /* Return non-zero if LHS contains RHS, zero otherwise. */
603 btrace_line_range_contains_range (struct btrace_line_range lhs,
604 struct btrace_line_range rhs)
606 return ((lhs.symtab == rhs.symtab)
607 && (lhs.begin <= rhs.begin)
608 && (rhs.end <= lhs.end));
611 /* Find the line range associated with PC. */
613 static struct btrace_line_range
614 btrace_find_line_range (CORE_ADDR pc)
616 struct btrace_line_range range;
617 struct linetable_entry *lines;
618 struct linetable *ltable;
619 struct symtab *symtab;
622 symtab = find_pc_line_symtab (pc);
624 return btrace_mk_line_range (NULL, 0, 0);
626 ltable = SYMTAB_LINETABLE (symtab);
628 return btrace_mk_line_range (symtab, 0, 0);
630 nlines = ltable->nitems;
631 lines = ltable->item;
633 return btrace_mk_line_range (symtab, 0, 0);
635 range = btrace_mk_line_range (symtab, 0, 0);
636 for (i = 0; i < nlines - 1; i++)
638 if ((lines[i].pc == pc) && (lines[i].line != 0))
639 range = btrace_line_range_add (range, lines[i].line);
645 /* Print source lines in LINES to UIOUT.
647 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
648 instructions corresponding to that source line. When printing a new source
649 line, we do the cleanups for the open chain and open a new cleanup chain for
650 the new source line. If the source line range in LINES is not empty, this
651 function will leave the cleanup chain for the last printed source line open
652 so instructions can be added to it. */
655 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
656 struct cleanup **ui_item_chain, int flags)
658 print_source_lines_flags psl_flags;
662 if (flags & DISASSEMBLY_FILENAME)
663 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
665 for (line = lines.begin; line < lines.end; ++line)
667 if (*ui_item_chain != NULL)
668 do_cleanups (*ui_item_chain);
671 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
673 print_source_lines (lines.symtab, line, line + 1, psl_flags);
675 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
679 /* Disassemble a section of the recorded instruction trace. */
682 btrace_insn_history (struct ui_out *uiout,
683 const struct btrace_thread_info *btinfo,
684 const struct btrace_insn_iterator *begin,
685 const struct btrace_insn_iterator *end, int flags)
687 struct cleanup *cleanups, *ui_item_chain;
688 struct gdbarch *gdbarch;
689 struct btrace_insn_iterator it;
690 struct btrace_line_range last_lines;
692 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
693 btrace_insn_number (end));
695 flags |= DISASSEMBLY_SPECULATIVE;
697 gdbarch = target_gdbarch ();
698 last_lines = btrace_mk_line_range (NULL, 0, 0);
700 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
702 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
703 instructions corresponding to that line. */
704 ui_item_chain = NULL;
706 gdb_pretty_print_disassembler disasm (gdbarch);
708 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
710 const struct btrace_insn *insn;
712 insn = btrace_insn_get (&it);
714 /* A NULL instruction indicates a gap in the trace. */
717 const struct btrace_config *conf;
719 conf = btrace_conf (btinfo);
721 /* We have trace so we must have a configuration. */
722 gdb_assert (conf != NULL);
724 uiout->field_fmt ("insn-number", "%u",
725 btrace_insn_number (&it));
728 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
733 struct disasm_insn dinsn;
735 if ((flags & DISASSEMBLY_SOURCE) != 0)
737 struct btrace_line_range lines;
739 lines = btrace_find_line_range (insn->pc);
740 if (!btrace_line_range_is_empty (lines)
741 && !btrace_line_range_contains_range (last_lines, lines))
743 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
746 else if (ui_item_chain == NULL)
749 = make_cleanup_ui_out_tuple_begin_end (uiout,
751 /* No source information. */
752 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
755 gdb_assert (ui_item_chain != NULL);
758 memset (&dinsn, 0, sizeof (dinsn));
759 dinsn.number = btrace_insn_number (&it);
760 dinsn.addr = insn->pc;
762 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
763 dinsn.is_speculative = 1;
765 disasm.pretty_print_insn (uiout, &dinsn, flags);
769 do_cleanups (cleanups);
772 /* The to_insn_history method of target record-btrace. */
775 record_btrace_insn_history (struct target_ops *self, int size, int flags)
777 struct btrace_thread_info *btinfo;
778 struct btrace_insn_history *history;
779 struct btrace_insn_iterator begin, end;
780 struct cleanup *uiout_cleanup;
781 struct ui_out *uiout;
782 unsigned int context, covered;
784 uiout = current_uiout;
785 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
787 context = abs (size);
789 error (_("Bad record instruction-history-size."));
791 btinfo = require_btrace ();
792 history = btinfo->insn_history;
795 struct btrace_insn_iterator *replay;
797 DEBUG ("insn-history (0x%x): %d", flags, size);
799 /* If we're replaying, we start at the replay position. Otherwise, we
800 start at the tail of the trace. */
801 replay = btinfo->replay;
805 btrace_insn_end (&begin, btinfo);
807 /* We start from here and expand in the requested direction. Then we
808 expand in the other direction, as well, to fill up any remaining
813 /* We want the current position covered, as well. */
814 covered = btrace_insn_next (&end, 1);
815 covered += btrace_insn_prev (&begin, context - covered);
816 covered += btrace_insn_next (&end, context - covered);
820 covered = btrace_insn_next (&end, context);
821 covered += btrace_insn_prev (&begin, context - covered);
826 begin = history->begin;
829 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
830 btrace_insn_number (&begin), btrace_insn_number (&end));
835 covered = btrace_insn_prev (&begin, context);
840 covered = btrace_insn_next (&end, context);
845 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
849 printf_unfiltered (_("At the start of the branch trace record.\n"));
851 printf_unfiltered (_("At the end of the branch trace record.\n"));
854 btrace_set_insn_history (btinfo, &begin, &end);
855 do_cleanups (uiout_cleanup);
858 /* The to_insn_history_range method of target record-btrace. */
861 record_btrace_insn_history_range (struct target_ops *self,
862 ULONGEST from, ULONGEST to, int flags)
864 struct btrace_thread_info *btinfo;
865 struct btrace_insn_history *history;
866 struct btrace_insn_iterator begin, end;
867 struct cleanup *uiout_cleanup;
868 struct ui_out *uiout;
869 unsigned int low, high;
872 uiout = current_uiout;
873 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
878 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
880 /* Check for wrap-arounds. */
881 if (low != from || high != to)
882 error (_("Bad range."));
885 error (_("Bad range."));
887 btinfo = require_btrace ();
889 found = btrace_find_insn_by_number (&begin, btinfo, low);
891 error (_("Range out of bounds."));
893 found = btrace_find_insn_by_number (&end, btinfo, high);
896 /* Silently truncate the range. */
897 btrace_insn_end (&end, btinfo);
901 /* We want both begin and end to be inclusive. */
902 btrace_insn_next (&end, 1);
905 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
906 btrace_set_insn_history (btinfo, &begin, &end);
908 do_cleanups (uiout_cleanup);
911 /* The to_insn_history_from method of target record-btrace. */
914 record_btrace_insn_history_from (struct target_ops *self,
915 ULONGEST from, int size, int flags)
917 ULONGEST begin, end, context;
919 context = abs (size);
921 error (_("Bad record instruction-history-size."));
930 begin = from - context + 1;
935 end = from + context - 1;
937 /* Check for wrap-around. */
942 record_btrace_insn_history_range (self, begin, end, flags);
945 /* Print the instruction number range for a function call history line. */
948 btrace_call_history_insn_range (struct ui_out *uiout,
949 const struct btrace_function *bfun)
951 unsigned int begin, end, size;
953 size = VEC_length (btrace_insn_s, bfun->insn);
954 gdb_assert (size > 0);
956 begin = bfun->insn_offset;
957 end = begin + size - 1;
959 ui_out_field_uint (uiout, "insn begin", begin);
961 ui_out_field_uint (uiout, "insn end", end);
964 /* Compute the lowest and highest source line for the instructions in BFUN
965 and return them in PBEGIN and PEND.
966 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
967 result from inlining or macro expansion. */
970 btrace_compute_src_line_range (const struct btrace_function *bfun,
971 int *pbegin, int *pend)
973 struct btrace_insn *insn;
974 struct symtab *symtab;
986 symtab = symbol_symtab (sym);
988 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
990 struct symtab_and_line sal;
992 sal = find_pc_line (insn->pc, 0);
993 if (sal.symtab != symtab || sal.line == 0)
996 begin = std::min (begin, sal.line);
997 end = std::max (end, sal.line);
1005 /* Print the source line information for a function call history line. */
1008 btrace_call_history_src_line (struct ui_out *uiout,
1009 const struct btrace_function *bfun)
1018 uiout->field_string ("file",
1019 symtab_to_filename_for_display (symbol_symtab (sym)));
1021 btrace_compute_src_line_range (bfun, &begin, &end);
1026 uiout->field_int ("min line", begin);
1032 uiout->field_int ("max line", end);
1035 /* Get the name of a branch trace function. */
1038 btrace_get_bfun_name (const struct btrace_function *bfun)
1040 struct minimal_symbol *msym;
1050 return SYMBOL_PRINT_NAME (sym);
1051 else if (msym != NULL)
1052 return MSYMBOL_PRINT_NAME (msym);
1057 /* Disassemble a section of the recorded function trace. */
1060 btrace_call_history (struct ui_out *uiout,
1061 const struct btrace_thread_info *btinfo,
1062 const struct btrace_call_iterator *begin,
1063 const struct btrace_call_iterator *end,
1066 struct btrace_call_iterator it;
1067 record_print_flags flags = (enum record_print_flag) int_flags;
1069 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1070 btrace_call_number (end));
1072 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1074 const struct btrace_function *bfun;
1075 struct minimal_symbol *msym;
1078 bfun = btrace_call_get (&it);
1082 /* Print the function index. */
1083 ui_out_field_uint (uiout, "index", bfun->number);
1086 /* Indicate gaps in the trace. */
1087 if (bfun->errcode != 0)
1089 const struct btrace_config *conf;
1091 conf = btrace_conf (btinfo);
1093 /* We have trace so we must have a configuration. */
1094 gdb_assert (conf != NULL);
1096 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1101 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1103 int level = bfun->level + btinfo->level, i;
1105 for (i = 0; i < level; ++i)
1110 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1111 else if (msym != NULL)
1112 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1113 else if (!uiout->is_mi_like_p ())
1114 uiout->field_string ("function", "??");
1116 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1118 uiout->text (_("\tinst "));
1119 btrace_call_history_insn_range (uiout, bfun);
1122 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1124 uiout->text (_("\tat "));
1125 btrace_call_history_src_line (uiout, bfun);
1132 /* The to_call_history method of target record-btrace. */
1135 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1137 struct btrace_thread_info *btinfo;
1138 struct btrace_call_history *history;
1139 struct btrace_call_iterator begin, end;
1140 struct cleanup *uiout_cleanup;
1141 struct ui_out *uiout;
1142 unsigned int context, covered;
1143 record_print_flags flags = (enum record_print_flag) int_flags;
1145 uiout = current_uiout;
1146 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1148 context = abs (size);
1150 error (_("Bad record function-call-history-size."));
1152 btinfo = require_btrace ();
1153 history = btinfo->call_history;
1154 if (history == NULL)
1156 struct btrace_insn_iterator *replay;
1158 DEBUG ("call-history (0x%x): %d", int_flags, size);
1160 /* If we're replaying, we start at the replay position. Otherwise, we
1161 start at the tail of the trace. */
1162 replay = btinfo->replay;
1165 begin.function = replay->function;
1166 begin.btinfo = btinfo;
1169 btrace_call_end (&begin, btinfo);
1171 /* We start from here and expand in the requested direction. Then we
1172 expand in the other direction, as well, to fill up any remaining
1177 /* We want the current position covered, as well. */
1178 covered = btrace_call_next (&end, 1);
1179 covered += btrace_call_prev (&begin, context - covered);
1180 covered += btrace_call_next (&end, context - covered);
1184 covered = btrace_call_next (&end, context);
1185 covered += btrace_call_prev (&begin, context- covered);
1190 begin = history->begin;
1193 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1194 btrace_call_number (&begin), btrace_call_number (&end));
1199 covered = btrace_call_prev (&begin, context);
1204 covered = btrace_call_next (&end, context);
1209 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1213 printf_unfiltered (_("At the start of the branch trace record.\n"));
1215 printf_unfiltered (_("At the end of the branch trace record.\n"));
1218 btrace_set_call_history (btinfo, &begin, &end);
1219 do_cleanups (uiout_cleanup);
1222 /* The to_call_history_range method of target record-btrace. */
1225 record_btrace_call_history_range (struct target_ops *self,
1226 ULONGEST from, ULONGEST to,
1229 struct btrace_thread_info *btinfo;
1230 struct btrace_call_history *history;
1231 struct btrace_call_iterator begin, end;
1232 struct cleanup *uiout_cleanup;
1233 struct ui_out *uiout;
1234 unsigned int low, high;
1236 record_print_flags flags = (enum record_print_flag) int_flags;
1238 uiout = current_uiout;
1239 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1244 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1246 /* Check for wrap-arounds. */
1247 if (low != from || high != to)
1248 error (_("Bad range."));
1251 error (_("Bad range."));
1253 btinfo = require_btrace ();
1255 found = btrace_find_call_by_number (&begin, btinfo, low);
1257 error (_("Range out of bounds."));
1259 found = btrace_find_call_by_number (&end, btinfo, high);
1262 /* Silently truncate the range. */
1263 btrace_call_end (&end, btinfo);
1267 /* We want both begin and end to be inclusive. */
1268 btrace_call_next (&end, 1);
1271 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1272 btrace_set_call_history (btinfo, &begin, &end);
1274 do_cleanups (uiout_cleanup);
1277 /* The to_call_history_from method of target record-btrace. */
1280 record_btrace_call_history_from (struct target_ops *self,
1281 ULONGEST from, int size,
1284 ULONGEST begin, end, context;
1285 record_print_flags flags = (enum record_print_flag) int_flags;
1287 context = abs (size);
1289 error (_("Bad record function-call-history-size."));
1298 begin = from - context + 1;
1303 end = from + context - 1;
1305 /* Check for wrap-around. */
1310 record_btrace_call_history_range (self, begin, end, flags);
1313 /* The to_record_is_replaying method of target record-btrace. */
1316 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1318 struct thread_info *tp;
1320 ALL_NON_EXITED_THREADS (tp)
1321 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1327 /* The to_record_will_replay method of target record-btrace. */
1330 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1332 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1335 /* The to_xfer_partial method of target record-btrace. */
1337 static enum target_xfer_status
1338 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1339 const char *annex, gdb_byte *readbuf,
1340 const gdb_byte *writebuf, ULONGEST offset,
1341 ULONGEST len, ULONGEST *xfered_len)
1343 struct target_ops *t;
1345 /* Filter out requests that don't make sense during replay. */
1346 if (replay_memory_access == replay_memory_access_read_only
1347 && !record_btrace_generating_corefile
1348 && record_btrace_is_replaying (ops, inferior_ptid))
1352 case TARGET_OBJECT_MEMORY:
1354 struct target_section *section;
1356 /* We do not allow writing memory in general. */
1357 if (writebuf != NULL)
1360 return TARGET_XFER_UNAVAILABLE;
1363 /* We allow reading readonly memory. */
1364 section = target_section_by_addr (ops, offset);
1365 if (section != NULL)
1367 /* Check if the section we found is readonly. */
1368 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1369 section->the_bfd_section)
1370 & SEC_READONLY) != 0)
1372 /* Truncate the request to fit into this section. */
1373 len = std::min (len, section->endaddr - offset);
1379 return TARGET_XFER_UNAVAILABLE;
1384 /* Forward the request. */
1386 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1387 offset, len, xfered_len);
1390 /* The to_insert_breakpoint method of target record-btrace. */
1393 record_btrace_insert_breakpoint (struct target_ops *ops,
1394 struct gdbarch *gdbarch,
1395 struct bp_target_info *bp_tgt)
1400 /* Inserting breakpoints requires accessing memory. Allow it for the
1401 duration of this function. */
1402 old = replay_memory_access;
1403 replay_memory_access = replay_memory_access_read_write;
1408 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1410 CATCH (except, RETURN_MASK_ALL)
1412 replay_memory_access = old;
1413 throw_exception (except);
1416 replay_memory_access = old;
1421 /* The to_remove_breakpoint method of target record-btrace. */
1424 record_btrace_remove_breakpoint (struct target_ops *ops,
1425 struct gdbarch *gdbarch,
1426 struct bp_target_info *bp_tgt,
1427 enum remove_bp_reason reason)
1432 /* Removing breakpoints requires accessing memory. Allow it for the
1433 duration of this function. */
1434 old = replay_memory_access;
1435 replay_memory_access = replay_memory_access_read_write;
1440 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1443 CATCH (except, RETURN_MASK_ALL)
1445 replay_memory_access = old;
1446 throw_exception (except);
1449 replay_memory_access = old;
1454 /* The to_fetch_registers method of target record-btrace. */
1457 record_btrace_fetch_registers (struct target_ops *ops,
1458 struct regcache *regcache, int regno)
1460 struct btrace_insn_iterator *replay;
1461 struct thread_info *tp;
1463 tp = find_thread_ptid (inferior_ptid);
1464 gdb_assert (tp != NULL);
1466 replay = tp->btrace.replay;
1467 if (replay != NULL && !record_btrace_generating_corefile)
1469 const struct btrace_insn *insn;
1470 struct gdbarch *gdbarch;
1473 gdbarch = get_regcache_arch (regcache);
1474 pcreg = gdbarch_pc_regnum (gdbarch);
1478 /* We can only provide the PC register. */
1479 if (regno >= 0 && regno != pcreg)
1482 insn = btrace_insn_get (replay);
1483 gdb_assert (insn != NULL);
1485 regcache_raw_supply (regcache, regno, &insn->pc);
1489 struct target_ops *t = ops->beneath;
1491 t->to_fetch_registers (t, regcache, regno);
1495 /* The to_store_registers method of target record-btrace. */
1498 record_btrace_store_registers (struct target_ops *ops,
1499 struct regcache *regcache, int regno)
1501 struct target_ops *t;
1503 if (!record_btrace_generating_corefile
1504 && record_btrace_is_replaying (ops, inferior_ptid))
1505 error (_("Cannot write registers while replaying."));
1507 gdb_assert (may_write_registers != 0);
1510 t->to_store_registers (t, regcache, regno);
1513 /* The to_prepare_to_store method of target record-btrace. */
1516 record_btrace_prepare_to_store (struct target_ops *ops,
1517 struct regcache *regcache)
1519 struct target_ops *t;
1521 if (!record_btrace_generating_corefile
1522 && record_btrace_is_replaying (ops, inferior_ptid))
1526 t->to_prepare_to_store (t, regcache);
1529 /* The branch trace frame cache. */
1531 struct btrace_frame_cache
1534 struct thread_info *tp;
1536 /* The frame info. */
1537 struct frame_info *frame;
1539 /* The branch trace function segment. */
1540 const struct btrace_function *bfun;
1543 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1545 static htab_t bfcache;
1547 /* hash_f for htab_create_alloc of bfcache. */
1550 bfcache_hash (const void *arg)
1552 const struct btrace_frame_cache *cache
1553 = (const struct btrace_frame_cache *) arg;
1555 return htab_hash_pointer (cache->frame);
1558 /* eq_f for htab_create_alloc of bfcache. */
1561 bfcache_eq (const void *arg1, const void *arg2)
1563 const struct btrace_frame_cache *cache1
1564 = (const struct btrace_frame_cache *) arg1;
1565 const struct btrace_frame_cache *cache2
1566 = (const struct btrace_frame_cache *) arg2;
1568 return cache1->frame == cache2->frame;
1571 /* Create a new btrace frame cache. */
1573 static struct btrace_frame_cache *
1574 bfcache_new (struct frame_info *frame)
1576 struct btrace_frame_cache *cache;
1579 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1580 cache->frame = frame;
1582 slot = htab_find_slot (bfcache, cache, INSERT);
1583 gdb_assert (*slot == NULL);
1589 /* Extract the branch trace function from a branch trace frame. */
1591 static const struct btrace_function *
1592 btrace_get_frame_function (struct frame_info *frame)
1594 const struct btrace_frame_cache *cache;
1595 const struct btrace_function *bfun;
1596 struct btrace_frame_cache pattern;
1599 pattern.frame = frame;
1601 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1605 cache = (const struct btrace_frame_cache *) *slot;
1609 /* Implement stop_reason method for record_btrace_frame_unwind. */
1611 static enum unwind_stop_reason
1612 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun;
1618 cache = (const struct btrace_frame_cache *) *this_cache;
1620 gdb_assert (bfun != NULL);
1622 if (bfun->up == NULL)
1623 return UNWIND_UNAVAILABLE;
1625 return UNWIND_NO_REASON;
1628 /* Implement this_id method for record_btrace_frame_unwind. */
1631 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1632 struct frame_id *this_id)
1634 const struct btrace_frame_cache *cache;
1635 const struct btrace_function *bfun;
1636 CORE_ADDR code, special;
1638 cache = (const struct btrace_frame_cache *) *this_cache;
1641 gdb_assert (bfun != NULL);
1643 while (bfun->segment.prev != NULL)
1644 bfun = bfun->segment.prev;
1646 code = get_frame_func (this_frame);
1647 special = bfun->number;
1649 *this_id = frame_id_build_unavailable_stack_special (code, special);
1651 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1652 btrace_get_bfun_name (cache->bfun),
1653 core_addr_to_string_nz (this_id->code_addr),
1654 core_addr_to_string_nz (this_id->special_addr));
1657 /* Implement prev_register method for record_btrace_frame_unwind. */
1659 static struct value *
1660 record_btrace_frame_prev_register (struct frame_info *this_frame,
1664 const struct btrace_frame_cache *cache;
1665 const struct btrace_function *bfun, *caller;
1666 const struct btrace_insn *insn;
1667 struct gdbarch *gdbarch;
1671 gdbarch = get_frame_arch (this_frame);
1672 pcreg = gdbarch_pc_regnum (gdbarch);
1673 if (pcreg < 0 || regnum != pcreg)
1674 throw_error (NOT_AVAILABLE_ERROR,
1675 _("Registers are not available in btrace record history"));
1677 cache = (const struct btrace_frame_cache *) *this_cache;
1679 gdb_assert (bfun != NULL);
1683 throw_error (NOT_AVAILABLE_ERROR,
1684 _("No caller in btrace record history"));
1686 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1688 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1693 insn = VEC_last (btrace_insn_s, caller->insn);
1696 pc += gdb_insn_length (gdbarch, pc);
1699 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1700 btrace_get_bfun_name (bfun), bfun->level,
1701 core_addr_to_string_nz (pc));
1703 return frame_unwind_got_address (this_frame, regnum, pc);
1706 /* Implement sniffer method for record_btrace_frame_unwind. */
1709 record_btrace_frame_sniffer (const struct frame_unwind *self,
1710 struct frame_info *this_frame,
1713 const struct btrace_function *bfun;
1714 struct btrace_frame_cache *cache;
1715 struct thread_info *tp;
1716 struct frame_info *next;
1718 /* THIS_FRAME does not contain a reference to its thread. */
1719 tp = find_thread_ptid (inferior_ptid);
1720 gdb_assert (tp != NULL);
1723 next = get_next_frame (this_frame);
1726 const struct btrace_insn_iterator *replay;
1728 replay = tp->btrace.replay;
1730 bfun = replay->function;
1734 const struct btrace_function *callee;
1736 callee = btrace_get_frame_function (next);
1737 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1744 DEBUG ("[frame] sniffed frame for %s on level %d",
1745 btrace_get_bfun_name (bfun), bfun->level);
1747 /* This is our frame. Initialize the frame cache. */
1748 cache = bfcache_new (this_frame);
1752 *this_cache = cache;
1756 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1759 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1760 struct frame_info *this_frame,
1763 const struct btrace_function *bfun, *callee;
1764 struct btrace_frame_cache *cache;
1765 struct frame_info *next;
1767 next = get_next_frame (this_frame);
1771 callee = btrace_get_frame_function (next);
1775 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1782 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1783 btrace_get_bfun_name (bfun), bfun->level);
1785 /* This is our frame. Initialize the frame cache. */
1786 cache = bfcache_new (this_frame);
1787 cache->tp = find_thread_ptid (inferior_ptid);
1790 *this_cache = cache;
1795 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1797 struct btrace_frame_cache *cache;
1800 cache = (struct btrace_frame_cache *) this_cache;
1802 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1803 gdb_assert (slot != NULL);
1805 htab_remove_elt (bfcache, cache);
1808 /* btrace recording does not store previous memory content, neither the stack
1809 frames content. Any unwinding would return errorneous results as the stack
1810 contents no longer matches the changed PC value restored from history.
1811 Therefore this unwinder reports any possibly unwound registers as
1814 const struct frame_unwind record_btrace_frame_unwind =
1817 record_btrace_frame_unwind_stop_reason,
1818 record_btrace_frame_this_id,
1819 record_btrace_frame_prev_register,
1821 record_btrace_frame_sniffer,
1822 record_btrace_frame_dealloc_cache
1825 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1828 record_btrace_frame_unwind_stop_reason,
1829 record_btrace_frame_this_id,
1830 record_btrace_frame_prev_register,
1832 record_btrace_tailcall_frame_sniffer,
1833 record_btrace_frame_dealloc_cache
1836 /* Implement the to_get_unwinder method. */
1838 static const struct frame_unwind *
1839 record_btrace_to_get_unwinder (struct target_ops *self)
1841 return &record_btrace_frame_unwind;
1844 /* Implement the to_get_tailcall_unwinder method. */
1846 static const struct frame_unwind *
1847 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1849 return &record_btrace_tailcall_frame_unwind;
1852 /* Return a human-readable string for FLAG. */
1855 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1863 return "reverse-step";
1869 return "reverse-cont";
1878 /* Indicate that TP should be resumed according to FLAG. */
1881 record_btrace_resume_thread (struct thread_info *tp,
1882 enum btrace_thread_flag flag)
1884 struct btrace_thread_info *btinfo;
1886 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1887 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1889 btinfo = &tp->btrace;
1891 /* Fetch the latest branch trace. */
1894 /* A resume request overwrites a preceding resume or stop request. */
1895 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1896 btinfo->flags |= flag;
1899 /* Get the current frame for TP. */
1901 static struct frame_info *
1902 get_thread_current_frame (struct thread_info *tp)
1904 struct frame_info *frame;
1905 ptid_t old_inferior_ptid;
1908 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1909 old_inferior_ptid = inferior_ptid;
1910 inferior_ptid = tp->ptid;
1912 /* Clear the executing flag to allow changes to the current frame.
1913 We are not actually running, yet. We just started a reverse execution
1914 command or a record goto command.
1915 For the latter, EXECUTING is false and this has no effect.
1916 For the former, EXECUTING is true and we're in to_wait, about to
1917 move the thread. Since we need to recompute the stack, we temporarily
1918 set EXECUTING to flase. */
1919 executing = is_executing (inferior_ptid);
1920 set_executing (inferior_ptid, 0);
1925 frame = get_current_frame ();
1927 CATCH (except, RETURN_MASK_ALL)
1929 /* Restore the previous execution state. */
1930 set_executing (inferior_ptid, executing);
1932 /* Restore the previous inferior_ptid. */
1933 inferior_ptid = old_inferior_ptid;
1935 throw_exception (except);
1939 /* Restore the previous execution state. */
1940 set_executing (inferior_ptid, executing);
1942 /* Restore the previous inferior_ptid. */
1943 inferior_ptid = old_inferior_ptid;
1948 /* Start replaying a thread. */
1950 static struct btrace_insn_iterator *
1951 record_btrace_start_replaying (struct thread_info *tp)
1953 struct btrace_insn_iterator *replay;
1954 struct btrace_thread_info *btinfo;
1956 btinfo = &tp->btrace;
1959 /* We can't start replaying without trace. */
1960 if (btinfo->begin == NULL)
1963 /* GDB stores the current frame_id when stepping in order to detects steps
1965 Since frames are computed differently when we're replaying, we need to
1966 recompute those stored frames and fix them up so we can still detect
1967 subroutines after we started replaying. */
1970 struct frame_info *frame;
1971 struct frame_id frame_id;
1972 int upd_step_frame_id, upd_step_stack_frame_id;
1974 /* The current frame without replaying - computed via normal unwind. */
1975 frame = get_thread_current_frame (tp);
1976 frame_id = get_frame_id (frame);
1978 /* Check if we need to update any stepping-related frame id's. */
1979 upd_step_frame_id = frame_id_eq (frame_id,
1980 tp->control.step_frame_id);
1981 upd_step_stack_frame_id = frame_id_eq (frame_id,
1982 tp->control.step_stack_frame_id);
1984 /* We start replaying at the end of the branch trace. This corresponds
1985 to the current instruction. */
1986 replay = XNEW (struct btrace_insn_iterator);
1987 btrace_insn_end (replay, btinfo);
1989 /* Skip gaps at the end of the trace. */
1990 while (btrace_insn_get (replay) == NULL)
1994 steps = btrace_insn_prev (replay, 1);
1996 error (_("No trace."));
1999 /* We're not replaying, yet. */
2000 gdb_assert (btinfo->replay == NULL);
2001 btinfo->replay = replay;
2003 /* Make sure we're not using any stale registers. */
2004 registers_changed_ptid (tp->ptid);
2006 /* The current frame with replaying - computed via btrace unwind. */
2007 frame = get_thread_current_frame (tp);
2008 frame_id = get_frame_id (frame);
2010 /* Replace stepping related frames where necessary. */
2011 if (upd_step_frame_id)
2012 tp->control.step_frame_id = frame_id;
2013 if (upd_step_stack_frame_id)
2014 tp->control.step_stack_frame_id = frame_id;
2016 CATCH (except, RETURN_MASK_ALL)
2018 xfree (btinfo->replay);
2019 btinfo->replay = NULL;
2021 registers_changed_ptid (tp->ptid);
2023 throw_exception (except);
2030 /* Stop replaying a thread. */
2033 record_btrace_stop_replaying (struct thread_info *tp)
2035 struct btrace_thread_info *btinfo;
2037 btinfo = &tp->btrace;
2039 xfree (btinfo->replay);
2040 btinfo->replay = NULL;
2042 /* Make sure we're not leaving any stale registers. */
2043 registers_changed_ptid (tp->ptid);
2046 /* Stop replaying TP if it is at the end of its execution history. */
2049 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2051 struct btrace_insn_iterator *replay, end;
2052 struct btrace_thread_info *btinfo;
2054 btinfo = &tp->btrace;
2055 replay = btinfo->replay;
2060 btrace_insn_end (&end, btinfo);
2062 if (btrace_insn_cmp (replay, &end) == 0)
2063 record_btrace_stop_replaying (tp);
2066 /* The to_resume method of target record-btrace. */
2069 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2070 enum gdb_signal signal)
2072 struct thread_info *tp;
2073 enum btrace_thread_flag flag, cflag;
2075 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2076 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2077 step ? "step" : "cont");
2079 /* Store the execution direction of the last resume.
2081 If there is more than one to_resume call, we have to rely on infrun
2082 to not change the execution direction in-between. */
2083 record_btrace_resume_exec_dir = execution_direction;
2085 /* As long as we're not replaying, just forward the request.
2087 For non-stop targets this means that no thread is replaying. In order to
2088 make progress, we may need to explicitly move replaying threads to the end
2089 of their execution history. */
2090 if ((execution_direction != EXEC_REVERSE)
2091 && !record_btrace_is_replaying (ops, minus_one_ptid))
2094 ops->to_resume (ops, ptid, step, signal);
2098 /* Compute the btrace thread flag for the requested move. */
2099 if (execution_direction == EXEC_REVERSE)
2101 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2106 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2110 /* We just indicate the resume intent here. The actual stepping happens in
2111 record_btrace_wait below.
2113 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2114 if (!target_is_non_stop_p ())
2116 gdb_assert (ptid_match (inferior_ptid, ptid));
2118 ALL_NON_EXITED_THREADS (tp)
2119 if (ptid_match (tp->ptid, ptid))
2121 if (ptid_match (tp->ptid, inferior_ptid))
2122 record_btrace_resume_thread (tp, flag);
2124 record_btrace_resume_thread (tp, cflag);
2129 ALL_NON_EXITED_THREADS (tp)
2130 if (ptid_match (tp->ptid, ptid))
2131 record_btrace_resume_thread (tp, flag);
2134 /* Async support. */
2135 if (target_can_async_p ())
2138 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2142 /* The to_commit_resume method of target record-btrace. */
2145 record_btrace_commit_resume (struct target_ops *ops)
2147 if ((execution_direction != EXEC_REVERSE)
2148 && !record_btrace_is_replaying (ops, minus_one_ptid))
2149 ops->beneath->to_commit_resume (ops->beneath);
2152 /* Cancel resuming TP. */
2155 record_btrace_cancel_resume (struct thread_info *tp)
2157 enum btrace_thread_flag flags;
2159 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2163 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2164 print_thread_id (tp),
2165 target_pid_to_str (tp->ptid), flags,
2166 btrace_thread_flag_to_str (flags));
2168 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2169 record_btrace_stop_replaying_at_end (tp);
2172 /* Return a target_waitstatus indicating that we ran out of history. */
2174 static struct target_waitstatus
2175 btrace_step_no_history (void)
2177 struct target_waitstatus status;
2179 status.kind = TARGET_WAITKIND_NO_HISTORY;
2184 /* Return a target_waitstatus indicating that a step finished. */
2186 static struct target_waitstatus
2187 btrace_step_stopped (void)
2189 struct target_waitstatus status;
2191 status.kind = TARGET_WAITKIND_STOPPED;
2192 status.value.sig = GDB_SIGNAL_TRAP;
2197 /* Return a target_waitstatus indicating that a thread was stopped as
2200 static struct target_waitstatus
2201 btrace_step_stopped_on_request (void)
2203 struct target_waitstatus status;
2205 status.kind = TARGET_WAITKIND_STOPPED;
2206 status.value.sig = GDB_SIGNAL_0;
2211 /* Return a target_waitstatus indicating a spurious stop. */
2213 static struct target_waitstatus
2214 btrace_step_spurious (void)
2216 struct target_waitstatus status;
2218 status.kind = TARGET_WAITKIND_SPURIOUS;
2223 /* Return a target_waitstatus indicating that the thread was not resumed. */
2225 static struct target_waitstatus
2226 btrace_step_no_resumed (void)
2228 struct target_waitstatus status;
2230 status.kind = TARGET_WAITKIND_NO_RESUMED;
2235 /* Return a target_waitstatus indicating that we should wait again. */
2237 static struct target_waitstatus
2238 btrace_step_again (void)
2240 struct target_waitstatus status;
2242 status.kind = TARGET_WAITKIND_IGNORE;
2247 /* Clear the record histories. */
2250 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2252 xfree (btinfo->insn_history);
2253 xfree (btinfo->call_history);
2255 btinfo->insn_history = NULL;
2256 btinfo->call_history = NULL;
2259 /* Check whether TP's current replay position is at a breakpoint. */
2262 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2264 struct btrace_insn_iterator *replay;
2265 struct btrace_thread_info *btinfo;
2266 const struct btrace_insn *insn;
2267 struct inferior *inf;
2269 btinfo = &tp->btrace;
2270 replay = btinfo->replay;
2275 insn = btrace_insn_get (replay);
2279 inf = find_inferior_ptid (tp->ptid);
2283 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2284 &btinfo->stop_reason);
2287 /* Step one instruction in forward direction. */
2289 static struct target_waitstatus
2290 record_btrace_single_step_forward (struct thread_info *tp)
2292 struct btrace_insn_iterator *replay, end, start;
2293 struct btrace_thread_info *btinfo;
2295 btinfo = &tp->btrace;
2296 replay = btinfo->replay;
2298 /* We're done if we're not replaying. */
2300 return btrace_step_no_history ();
2302 /* Check if we're stepping a breakpoint. */
2303 if (record_btrace_replay_at_breakpoint (tp))
2304 return btrace_step_stopped ();
2306 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2307 jump back to the instruction at which we started. */
2313 /* We will bail out here if we continue stepping after reaching the end
2314 of the execution history. */
2315 steps = btrace_insn_next (replay, 1);
2319 return btrace_step_no_history ();
2322 while (btrace_insn_get (replay) == NULL);
2324 /* Determine the end of the instruction trace. */
2325 btrace_insn_end (&end, btinfo);
2327 /* The execution trace contains (and ends with) the current instruction.
2328 This instruction has not been executed, yet, so the trace really ends
2329 one instruction earlier. */
2330 if (btrace_insn_cmp (replay, &end) == 0)
2331 return btrace_step_no_history ();
2333 return btrace_step_spurious ();
2336 /* Step one instruction in backward direction. */
2338 static struct target_waitstatus
2339 record_btrace_single_step_backward (struct thread_info *tp)
2341 struct btrace_insn_iterator *replay, start;
2342 struct btrace_thread_info *btinfo;
2344 btinfo = &tp->btrace;
2345 replay = btinfo->replay;
2347 /* Start replaying if we're not already doing so. */
2349 replay = record_btrace_start_replaying (tp);
2351 /* If we can't step any further, we reached the end of the history.
2352 Skip gaps during replay. If we end up at a gap (at the beginning of
2353 the trace), jump back to the instruction at which we started. */
2359 steps = btrace_insn_prev (replay, 1);
2363 return btrace_step_no_history ();
2366 while (btrace_insn_get (replay) == NULL);
2368 /* Check if we're stepping a breakpoint.
2370 For reverse-stepping, this check is after the step. There is logic in
2371 infrun.c that handles reverse-stepping separately. See, for example,
2372 proceed and adjust_pc_after_break.
2374 This code assumes that for reverse-stepping, PC points to the last
2375 de-executed instruction, whereas for forward-stepping PC points to the
2376 next to-be-executed instruction. */
2377 if (record_btrace_replay_at_breakpoint (tp))
2378 return btrace_step_stopped ();
2380 return btrace_step_spurious ();
2383 /* Step a single thread. */
2385 static struct target_waitstatus
2386 record_btrace_step_thread (struct thread_info *tp)
2388 struct btrace_thread_info *btinfo;
2389 struct target_waitstatus status;
2390 enum btrace_thread_flag flags;
2392 btinfo = &tp->btrace;
2394 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2395 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2397 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2398 target_pid_to_str (tp->ptid), flags,
2399 btrace_thread_flag_to_str (flags));
2401 /* We can't step without an execution history. */
2402 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2403 return btrace_step_no_history ();
2408 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2411 return btrace_step_stopped_on_request ();
2414 status = record_btrace_single_step_forward (tp);
2415 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2418 return btrace_step_stopped ();
2421 status = record_btrace_single_step_backward (tp);
2422 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2425 return btrace_step_stopped ();
2428 status = record_btrace_single_step_forward (tp);
2429 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2432 btinfo->flags |= flags;
2433 return btrace_step_again ();
2436 status = record_btrace_single_step_backward (tp);
2437 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2440 btinfo->flags |= flags;
2441 return btrace_step_again ();
2444 /* We keep threads moving at the end of their execution history. The to_wait
2445 method will stop the thread for whom the event is reported. */
2446 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2447 btinfo->flags |= flags;
2452 /* A vector of threads. */
2454 typedef struct thread_info * tp_t;
2457 /* Announce further events if necessary. */
2460 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2461 const VEC (tp_t) *no_history)
2463 int more_moving, more_no_history;
2465 more_moving = !VEC_empty (tp_t, moving);
2466 more_no_history = !VEC_empty (tp_t, no_history);
2468 if (!more_moving && !more_no_history)
2472 DEBUG ("movers pending");
2474 if (more_no_history)
2475 DEBUG ("no-history pending");
2477 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2480 /* The to_wait method of target record-btrace. */
2483 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2484 struct target_waitstatus *status, int options)
2486 VEC (tp_t) *moving, *no_history;
2487 struct thread_info *tp, *eventing;
2488 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2490 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2492 /* As long as we're not replaying, just forward the request. */
2493 if ((execution_direction != EXEC_REVERSE)
2494 && !record_btrace_is_replaying (ops, minus_one_ptid))
2497 return ops->to_wait (ops, ptid, status, options);
2503 make_cleanup (VEC_cleanup (tp_t), &moving);
2504 make_cleanup (VEC_cleanup (tp_t), &no_history);
2506 /* Keep a work list of moving threads. */
2507 ALL_NON_EXITED_THREADS (tp)
2508 if (ptid_match (tp->ptid, ptid)
2509 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2510 VEC_safe_push (tp_t, moving, tp);
2512 if (VEC_empty (tp_t, moving))
2514 *status = btrace_step_no_resumed ();
2516 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2517 target_waitstatus_to_string (status));
2519 do_cleanups (cleanups);
2523 /* Step moving threads one by one, one step each, until either one thread
2524 reports an event or we run out of threads to step.
2526 When stepping more than one thread, chances are that some threads reach
2527 the end of their execution history earlier than others. If we reported
2528 this immediately, all-stop on top of non-stop would stop all threads and
2529 resume the same threads next time. And we would report the same thread
2530 having reached the end of its execution history again.
2532 In the worst case, this would starve the other threads. But even if other
2533 threads would be allowed to make progress, this would result in far too
2534 many intermediate stops.
2536 We therefore delay the reporting of "no execution history" until we have
2537 nothing else to report. By this time, all threads should have moved to
2538 either the beginning or the end of their execution history. There will
2539 be a single user-visible stop. */
2541 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2546 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2548 *status = record_btrace_step_thread (tp);
2550 switch (status->kind)
2552 case TARGET_WAITKIND_IGNORE:
2556 case TARGET_WAITKIND_NO_HISTORY:
2557 VEC_safe_push (tp_t, no_history,
2558 VEC_ordered_remove (tp_t, moving, ix));
2562 eventing = VEC_unordered_remove (tp_t, moving, ix);
2568 if (eventing == NULL)
2570 /* We started with at least one moving thread. This thread must have
2571 either stopped or reached the end of its execution history.
2573 In the former case, EVENTING must not be NULL.
2574 In the latter case, NO_HISTORY must not be empty. */
2575 gdb_assert (!VEC_empty (tp_t, no_history));
2577 /* We kept threads moving at the end of their execution history. Stop
2578 EVENTING now that we are going to report its stop. */
2579 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2580 eventing->btrace.flags &= ~BTHR_MOVE;
2582 *status = btrace_step_no_history ();
2585 gdb_assert (eventing != NULL);
2587 /* We kept threads replaying at the end of their execution history. Stop
2588 replaying EVENTING now that we are going to report its stop. */
2589 record_btrace_stop_replaying_at_end (eventing);
2591 /* Stop all other threads. */
2592 if (!target_is_non_stop_p ())
2593 ALL_NON_EXITED_THREADS (tp)
2594 record_btrace_cancel_resume (tp);
2596 /* In async mode, we need to announce further events. */
2597 if (target_is_async_p ())
2598 record_btrace_maybe_mark_async_event (moving, no_history);
2600 /* Start record histories anew from the current position. */
2601 record_btrace_clear_histories (&eventing->btrace);
2603 /* We moved the replay position but did not update registers. */
2604 registers_changed_ptid (eventing->ptid);
2606 DEBUG ("wait ended by thread %s (%s): %s",
2607 print_thread_id (eventing),
2608 target_pid_to_str (eventing->ptid),
2609 target_waitstatus_to_string (status));
2611 do_cleanups (cleanups);
2612 return eventing->ptid;
2615 /* The to_stop method of target record-btrace. */
2618 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2620 DEBUG ("stop %s", target_pid_to_str (ptid));
2622 /* As long as we're not replaying, just forward the request. */
2623 if ((execution_direction != EXEC_REVERSE)
2624 && !record_btrace_is_replaying (ops, minus_one_ptid))
2627 ops->to_stop (ops, ptid);
2631 struct thread_info *tp;
2633 ALL_NON_EXITED_THREADS (tp)
2634 if (ptid_match (tp->ptid, ptid))
2636 tp->btrace.flags &= ~BTHR_MOVE;
2637 tp->btrace.flags |= BTHR_STOP;
2642 /* The to_can_execute_reverse method of target record-btrace. */
2645 record_btrace_can_execute_reverse (struct target_ops *self)
2650 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2653 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2655 if (record_btrace_is_replaying (ops, minus_one_ptid))
2657 struct thread_info *tp = inferior_thread ();
2659 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2662 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2665 /* The to_supports_stopped_by_sw_breakpoint method of target
2669 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2671 if (record_btrace_is_replaying (ops, minus_one_ptid))
2674 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2677 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2680 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2682 if (record_btrace_is_replaying (ops, minus_one_ptid))
2684 struct thread_info *tp = inferior_thread ();
2686 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2689 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2692 /* The to_supports_stopped_by_hw_breakpoint method of target
2696 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2698 if (record_btrace_is_replaying (ops, minus_one_ptid))
2701 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2704 /* The to_update_thread_list method of target record-btrace. */
2707 record_btrace_update_thread_list (struct target_ops *ops)
2709 /* We don't add or remove threads during replay. */
2710 if (record_btrace_is_replaying (ops, minus_one_ptid))
2713 /* Forward the request. */
2715 ops->to_update_thread_list (ops);
2718 /* The to_thread_alive method of target record-btrace. */
2721 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2723 /* We don't add or remove threads during replay. */
2724 if (record_btrace_is_replaying (ops, minus_one_ptid))
2725 return find_thread_ptid (ptid) != NULL;
2727 /* Forward the request. */
2729 return ops->to_thread_alive (ops, ptid);
2732 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2736 record_btrace_set_replay (struct thread_info *tp,
2737 const struct btrace_insn_iterator *it)
2739 struct btrace_thread_info *btinfo;
2741 btinfo = &tp->btrace;
2743 if (it == NULL || it->function == NULL)
2744 record_btrace_stop_replaying (tp);
2747 if (btinfo->replay == NULL)
2748 record_btrace_start_replaying (tp);
2749 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2752 *btinfo->replay = *it;
2753 registers_changed_ptid (tp->ptid);
2756 /* Start anew from the new replay position. */
2757 record_btrace_clear_histories (btinfo);
2759 stop_pc = regcache_read_pc (get_current_regcache ());
2760 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2763 /* The to_goto_record_begin method of target record-btrace. */
2766 record_btrace_goto_begin (struct target_ops *self)
2768 struct thread_info *tp;
2769 struct btrace_insn_iterator begin;
2771 tp = require_btrace_thread ();
2773 btrace_insn_begin (&begin, &tp->btrace);
2775 /* Skip gaps at the beginning of the trace. */
2776 while (btrace_insn_get (&begin) == NULL)
2780 steps = btrace_insn_next (&begin, 1);
2782 error (_("No trace."));
2785 record_btrace_set_replay (tp, &begin);
2788 /* The to_goto_record_end method of target record-btrace. */
2791 record_btrace_goto_end (struct target_ops *ops)
2793 struct thread_info *tp;
2795 tp = require_btrace_thread ();
2797 record_btrace_set_replay (tp, NULL);
2800 /* The to_goto_record method of target record-btrace. */
2803 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2805 struct thread_info *tp;
2806 struct btrace_insn_iterator it;
2807 unsigned int number;
2812 /* Check for wrap-arounds. */
2814 error (_("Instruction number out of range."));
2816 tp = require_btrace_thread ();
2818 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2820 /* Check if the instruction could not be found or is a gap. */
2821 if (found == 0 || btrace_insn_get (&it) == NULL)
2822 error (_("No such instruction."));
2824 record_btrace_set_replay (tp, &it);
2827 /* The to_record_stop_replaying method of target record-btrace. */
2830 record_btrace_stop_replaying_all (struct target_ops *self)
2832 struct thread_info *tp;
2834 ALL_NON_EXITED_THREADS (tp)
2835 record_btrace_stop_replaying (tp);
2838 /* The to_execution_direction target method. */
2840 static enum exec_direction_kind
2841 record_btrace_execution_direction (struct target_ops *self)
2843 return record_btrace_resume_exec_dir;
2846 /* The to_prepare_to_generate_core target method. */
2849 record_btrace_prepare_to_generate_core (struct target_ops *self)
2851 record_btrace_generating_corefile = 1;
2854 /* The to_done_generating_core target method. */
2857 record_btrace_done_generating_core (struct target_ops *self)
2859 record_btrace_generating_corefile = 0;
2862 /* Initialize the record-btrace target ops. */
2865 init_record_btrace_ops (void)
2867 struct target_ops *ops;
2869 ops = &record_btrace_ops;
2870 ops->to_shortname = "record-btrace";
2871 ops->to_longname = "Branch tracing target";
2872 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2873 ops->to_open = record_btrace_open;
2874 ops->to_close = record_btrace_close;
2875 ops->to_async = record_btrace_async;
2876 ops->to_detach = record_detach;
2877 ops->to_disconnect = record_btrace_disconnect;
2878 ops->to_mourn_inferior = record_mourn_inferior;
2879 ops->to_kill = record_kill;
2880 ops->to_stop_recording = record_btrace_stop_recording;
2881 ops->to_info_record = record_btrace_info;
2882 ops->to_insn_history = record_btrace_insn_history;
2883 ops->to_insn_history_from = record_btrace_insn_history_from;
2884 ops->to_insn_history_range = record_btrace_insn_history_range;
2885 ops->to_call_history = record_btrace_call_history;
2886 ops->to_call_history_from = record_btrace_call_history_from;
2887 ops->to_call_history_range = record_btrace_call_history_range;
2888 ops->to_record_is_replaying = record_btrace_is_replaying;
2889 ops->to_record_will_replay = record_btrace_will_replay;
2890 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2891 ops->to_xfer_partial = record_btrace_xfer_partial;
2892 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2893 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2894 ops->to_fetch_registers = record_btrace_fetch_registers;
2895 ops->to_store_registers = record_btrace_store_registers;
2896 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2897 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2898 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2899 ops->to_resume = record_btrace_resume;
2900 ops->to_commit_resume = record_btrace_commit_resume;
2901 ops->to_wait = record_btrace_wait;
2902 ops->to_stop = record_btrace_stop;
2903 ops->to_update_thread_list = record_btrace_update_thread_list;
2904 ops->to_thread_alive = record_btrace_thread_alive;
2905 ops->to_goto_record_begin = record_btrace_goto_begin;
2906 ops->to_goto_record_end = record_btrace_goto_end;
2907 ops->to_goto_record = record_btrace_goto;
2908 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2909 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2910 ops->to_supports_stopped_by_sw_breakpoint
2911 = record_btrace_supports_stopped_by_sw_breakpoint;
2912 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2913 ops->to_supports_stopped_by_hw_breakpoint
2914 = record_btrace_supports_stopped_by_hw_breakpoint;
2915 ops->to_execution_direction = record_btrace_execution_direction;
2916 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2917 ops->to_done_generating_core = record_btrace_done_generating_core;
2918 ops->to_stratum = record_stratum;
2919 ops->to_magic = OPS_MAGIC;
2922 /* Start recording in BTS format. */
2925 cmd_record_btrace_bts_start (char *args, int from_tty)
2927 if (args != NULL && *args != 0)
2928 error (_("Invalid argument."));
2930 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2934 execute_command ("target record-btrace", from_tty);
2936 CATCH (exception, RETURN_MASK_ALL)
2938 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2939 throw_exception (exception);
2944 /* Start recording in Intel Processor Trace format. */
2947 cmd_record_btrace_pt_start (char *args, int from_tty)
2949 if (args != NULL && *args != 0)
2950 error (_("Invalid argument."));
2952 record_btrace_conf.format = BTRACE_FORMAT_PT;
2956 execute_command ("target record-btrace", from_tty);
2958 CATCH (exception, RETURN_MASK_ALL)
2960 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2961 throw_exception (exception);
2966 /* Alias for "target record". */
2969 cmd_record_btrace_start (char *args, int from_tty)
2971 if (args != NULL && *args != 0)
2972 error (_("Invalid argument."));
2974 record_btrace_conf.format = BTRACE_FORMAT_PT;
2978 execute_command ("target record-btrace", from_tty);
2980 CATCH (exception, RETURN_MASK_ALL)
2982 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2986 execute_command ("target record-btrace", from_tty);
2988 CATCH (exception, RETURN_MASK_ALL)
2990 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2991 throw_exception (exception);
2998 /* The "set record btrace" command. */
3001 cmd_set_record_btrace (char *args, int from_tty)
3003 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3006 /* The "show record btrace" command. */
3009 cmd_show_record_btrace (char *args, int from_tty)
3011 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3014 /* The "show record btrace replay-memory-access" command. */
3017 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3018 struct cmd_list_element *c, const char *value)
3020 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3021 replay_memory_access);
3024 /* The "set record btrace bts" command. */
3027 cmd_set_record_btrace_bts (char *args, int from_tty)
3029 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3030 "by an appropriate subcommand.\n"));
3031 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3032 all_commands, gdb_stdout);
3035 /* The "show record btrace bts" command. */
3038 cmd_show_record_btrace_bts (char *args, int from_tty)
3040 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3043 /* The "set record btrace pt" command. */
3046 cmd_set_record_btrace_pt (char *args, int from_tty)
3048 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3049 "by an appropriate subcommand.\n"));
3050 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3051 all_commands, gdb_stdout);
3054 /* The "show record btrace pt" command. */
3057 cmd_show_record_btrace_pt (char *args, int from_tty)
3059 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3062 /* The "record bts buffer-size" show value function. */
3065 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3066 struct cmd_list_element *c,
3069 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3073 /* The "record pt buffer-size" show value function. */
3076 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3077 struct cmd_list_element *c,
3080 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3084 void _initialize_record_btrace (void);
3086 /* Initialize btrace commands. */
3089 _initialize_record_btrace (void)
3091 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3092 _("Start branch trace recording."), &record_btrace_cmdlist,
3093 "record btrace ", 0, &record_cmdlist);
3094 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3096 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3098 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3099 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3100 This format may not be available on all processors."),
3101 &record_btrace_cmdlist);
3102 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3104 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3106 Start branch trace recording in Intel Processor Trace format.\n\n\
3107 This format may not be available on all processors."),
3108 &record_btrace_cmdlist);
3109 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3111 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3112 _("Set record options"), &set_record_btrace_cmdlist,
3113 "set record btrace ", 0, &set_record_cmdlist);
3115 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3116 _("Show record options"), &show_record_btrace_cmdlist,
3117 "show record btrace ", 0, &show_record_cmdlist);
3119 add_setshow_enum_cmd ("replay-memory-access", no_class,
3120 replay_memory_access_types, &replay_memory_access, _("\
3121 Set what memory accesses are allowed during replay."), _("\
3122 Show what memory accesses are allowed during replay."),
3123 _("Default is READ-ONLY.\n\n\
3124 The btrace record target does not trace data.\n\
3125 The memory therefore corresponds to the live target and not \
3126 to the current replay position.\n\n\
3127 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3128 When READ-WRITE, allow accesses to read-only and read-write memory during \
3130 NULL, cmd_show_replay_memory_access,
3131 &set_record_btrace_cmdlist,
3132 &show_record_btrace_cmdlist);
3134 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3135 _("Set record btrace bts options"),
3136 &set_record_btrace_bts_cmdlist,
3137 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3139 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3140 _("Show record btrace bts options"),
3141 &show_record_btrace_bts_cmdlist,
3142 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3144 add_setshow_uinteger_cmd ("buffer-size", no_class,
3145 &record_btrace_conf.bts.size,
3146 _("Set the record/replay bts buffer size."),
3147 _("Show the record/replay bts buffer size."), _("\
3148 When starting recording request a trace buffer of this size. \
3149 The actual buffer size may differ from the requested size. \
3150 Use \"info record\" to see the actual buffer size.\n\n\
3151 Bigger buffers allow longer recording but also take more time to process \
3152 the recorded execution trace.\n\n\
3153 The trace buffer size may not be changed while recording."), NULL,
3154 show_record_bts_buffer_size_value,
3155 &set_record_btrace_bts_cmdlist,
3156 &show_record_btrace_bts_cmdlist);
3158 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3159 _("Set record btrace pt options"),
3160 &set_record_btrace_pt_cmdlist,
3161 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3163 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3164 _("Show record btrace pt options"),
3165 &show_record_btrace_pt_cmdlist,
3166 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3168 add_setshow_uinteger_cmd ("buffer-size", no_class,
3169 &record_btrace_conf.pt.size,
3170 _("Set the record/replay pt buffer size."),
3171 _("Show the record/replay pt buffer size."), _("\
3172 Bigger buffers allow longer recording but also take more time to process \
3173 the recorded execution.\n\
3174 The actual buffer size may differ from the requested size. Use \"info record\" \
3175 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3176 &set_record_btrace_pt_cmdlist,
3177 &show_record_btrace_pt_cmdlist);
3179 init_record_btrace_ops ();
3180 add_target (&record_btrace_ops);
3182 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3185 record_btrace_conf.bts.size = 64 * 1024;
3186 record_btrace_conf.pt.size = 16 * 1024;