1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
43 /* The target_ops of record-btrace. */
44 static struct target_ops record_btrace_ops;
46 /* A new thread observer enabling branch tracing for the new thread. */
47 static struct observer *record_btrace_thread_observer;
49 /* Memory access types used in set/show record btrace replay-memory-access. */
50 static const char replay_memory_access_read_only[] = "read-only";
51 static const char replay_memory_access_read_write[] = "read-write";
52 static const char *const replay_memory_access_types[] =
54 replay_memory_access_read_only,
55 replay_memory_access_read_write,
59 /* The currently allowed replay memory access type. */
60 static const char *replay_memory_access = replay_memory_access_read_only;
62 /* Command lists for "set/show record btrace". */
63 static struct cmd_list_element *set_record_btrace_cmdlist;
64 static struct cmd_list_element *show_record_btrace_cmdlist;
66 /* The execution direction of the last resume we got. See record-full.c. */
67 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69 /* The async event handler for reverse/replay execution. */
70 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72 /* A flag indicating that we are currently generating a core file. */
73 static int record_btrace_generating_corefile;
75 /* The current branch trace configuration. */
76 static struct btrace_config record_btrace_conf;
78 /* Command list for "record btrace". */
79 static struct cmd_list_element *record_btrace_cmdlist;
81 /* Command lists for "set/show record btrace bts". */
82 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
83 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85 /* Command lists for "set/show record btrace pt". */
86 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
87 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89 /* Print a record-btrace debug message. Use do ... while (0) to avoid
90 ambiguities when used in if statements. */
92 #define DEBUG(msg, args...) \
95 if (record_debug != 0) \
96 fprintf_unfiltered (gdb_stdlog, \
97 "[record-btrace] " msg "\n", ##args); \
102 /* Update the branch trace for the current thread and return a pointer to its
105 Throws an error if there is no thread or no trace. This function never
108 static struct thread_info *
109 require_btrace_thread (void)
111 struct thread_info *tp;
115 tp = find_thread_ptid (inferior_ptid);
117 error (_("No thread."));
121 if (btrace_is_empty (tp))
122 error (_("No trace."));
127 /* Update the branch trace for the current thread and return a pointer to its
128 branch trace information struct.
130 Throws an error if there is no thread or no trace. This function never
133 static struct btrace_thread_info *
134 require_btrace (void)
136 struct thread_info *tp;
138 tp = require_btrace_thread ();
143 /* Enable branch tracing for one thread. Warn on errors. */
146 record_btrace_enable_warn (struct thread_info *tp)
150 btrace_enable (tp, &record_btrace_conf);
152 CATCH (error, RETURN_MASK_ERROR)
154 warning ("%s", error.message);
159 /* Callback function to disable branch tracing for one thread. */
162 record_btrace_disable_callback (void *arg)
164 struct thread_info *tp = (struct thread_info *) arg;
169 /* Enable automatic tracing of new threads. */
172 record_btrace_auto_enable (void)
174 DEBUG ("attach thread observer");
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
180 /* Disable automatic tracing of new threads. */
183 record_btrace_auto_disable (void)
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
189 DEBUG ("detach thread observer");
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
195 /* The record-btrace async event handler function. */
198 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 inferior_event_handler (INF_REG_EVENT, NULL);
203 /* See record-btrace.h. */
206 record_btrace_push_target (void)
210 record_btrace_auto_enable ();
212 push_target (&record_btrace_ops);
214 record_btrace_async_inferior_event_handler
215 = create_async_event_handler (record_btrace_handle_async_inferior_event,
217 record_btrace_generating_corefile = 0;
219 format = btrace_format_short_string (record_btrace_conf.format);
220 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
223 /* The to_open method of target record-btrace. */
226 record_btrace_open (const char *args, int from_tty)
228 struct cleanup *disable_chain;
229 struct thread_info *tp;
235 if (!target_has_execution)
236 error (_("The program is not being run."));
238 gdb_assert (record_btrace_thread_observer == NULL);
240 disable_chain = make_cleanup (null_cleanup, NULL);
241 ALL_NON_EXITED_THREADS (tp)
242 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
244 btrace_enable (tp, &record_btrace_conf);
246 make_cleanup (record_btrace_disable_callback, tp);
249 record_btrace_push_target ();
251 discard_cleanups (disable_chain);
254 /* The to_stop_recording method of target record-btrace. */
257 record_btrace_stop_recording (struct target_ops *self)
259 struct thread_info *tp;
261 DEBUG ("stop recording");
263 record_btrace_auto_disable ();
265 ALL_NON_EXITED_THREADS (tp)
266 if (tp->btrace.target != NULL)
270 /* The to_disconnect method of target record-btrace. */
273 record_btrace_disconnect (struct target_ops *self, const char *args,
276 struct target_ops *beneath = self->beneath;
278 /* Do not stop recording, just clean up GDB side. */
279 unpush_target (self);
281 /* Forward disconnect. */
282 beneath->to_disconnect (beneath, args, from_tty);
285 /* The to_close method of target record-btrace. */
288 record_btrace_close (struct target_ops *self)
290 struct thread_info *tp;
292 if (record_btrace_async_inferior_event_handler != NULL)
293 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
295 /* Make sure automatic recording gets disabled even if we did not stop
296 recording before closing the record-btrace target. */
297 record_btrace_auto_disable ();
299 /* We should have already stopped recording.
300 Tear down btrace in case we have not. */
301 ALL_NON_EXITED_THREADS (tp)
302 btrace_teardown (tp);
305 /* The to_async method of target record-btrace. */
308 record_btrace_async (struct target_ops *ops, int enable)
311 mark_async_event_handler (record_btrace_async_inferior_event_handler);
313 clear_async_event_handler (record_btrace_async_inferior_event_handler);
315 ops->beneath->to_async (ops->beneath, enable);
318 /* Adjusts the size and returns a human readable size suffix. */
321 record_btrace_adjust_size (unsigned int *size)
327 if ((sz & ((1u << 30) - 1)) == 0)
332 else if ((sz & ((1u << 20) - 1)) == 0)
337 else if ((sz & ((1u << 10) - 1)) == 0)
346 /* Print a BTS configuration. */
349 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
357 suffix = record_btrace_adjust_size (&size);
358 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 /* Print an Intel Processor Trace configuration. */
365 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
373 suffix = record_btrace_adjust_size (&size);
374 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 /* Print a branch tracing configuration. */
381 record_btrace_print_conf (const struct btrace_config *conf)
383 printf_unfiltered (_("Recording format: %s.\n"),
384 btrace_format_string (conf->format));
386 switch (conf->format)
388 case BTRACE_FORMAT_NONE:
391 case BTRACE_FORMAT_BTS:
392 record_btrace_print_bts_conf (&conf->bts);
395 case BTRACE_FORMAT_PT:
396 record_btrace_print_pt_conf (&conf->pt);
400 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
403 /* The to_info_record method of target record-btrace. */
406 record_btrace_info (struct target_ops *self)
408 struct btrace_thread_info *btinfo;
409 const struct btrace_config *conf;
410 struct thread_info *tp;
411 unsigned int insns, calls, gaps;
415 tp = find_thread_ptid (inferior_ptid);
417 error (_("No thread."));
419 btinfo = &tp->btrace;
421 conf = btrace_conf (btinfo);
423 record_btrace_print_conf (conf);
431 if (!btrace_is_empty (tp))
433 struct btrace_call_iterator call;
434 struct btrace_insn_iterator insn;
436 btrace_call_end (&call, btinfo);
437 btrace_call_prev (&call, 1);
438 calls = btrace_call_number (&call);
440 btrace_insn_end (&insn, btinfo);
442 insns = btrace_insn_number (&insn);
445 /* The last instruction does not really belong to the trace. */
452 /* Skip gaps at the end. */
455 steps = btrace_insn_prev (&insn, 1);
459 insns = btrace_insn_number (&insn);
464 gaps = btinfo->ngaps;
467 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
468 "for thread %s (%s).\n"), insns, calls, gaps,
469 print_thread_id (tp), target_pid_to_str (tp->ptid));
471 if (btrace_is_replaying (tp))
472 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
473 btrace_insn_number (btinfo->replay));
476 /* Print a decode error. */
479 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
480 enum btrace_format format)
485 errstr = _("unknown");
493 case BTRACE_FORMAT_BTS:
499 case BDE_BTS_OVERFLOW:
500 errstr = _("instruction overflow");
503 case BDE_BTS_INSN_SIZE:
504 errstr = _("unknown instruction");
509 #if defined (HAVE_LIBIPT)
510 case BTRACE_FORMAT_PT:
513 case BDE_PT_USER_QUIT:
515 errstr = _("trace decode cancelled");
518 case BDE_PT_DISABLED:
520 errstr = _("disabled");
523 case BDE_PT_OVERFLOW:
525 errstr = _("overflow");
530 errstr = pt_errstr (pt_errcode (errcode));
534 #endif /* defined (HAVE_LIBIPT) */
537 ui_out_text (uiout, _("["));
540 ui_out_text (uiout, _("decode error ("));
541 ui_out_field_int (uiout, "errcode", errcode);
542 ui_out_text (uiout, _("): "));
544 ui_out_text (uiout, errstr);
545 ui_out_text (uiout, _("]\n"));
548 /* Print an unsigned int. */
551 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
553 ui_out_field_fmt (uiout, fld, "%u", val);
556 /* A range of source lines. */
558 struct btrace_line_range
560 /* The symtab this line is from. */
561 struct symtab *symtab;
563 /* The first line (inclusive). */
566 /* The last line (exclusive). */
570 /* Construct a line range. */
572 static struct btrace_line_range
573 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
575 struct btrace_line_range range;
577 range.symtab = symtab;
584 /* Add a line to a line range. */
586 static struct btrace_line_range
587 btrace_line_range_add (struct btrace_line_range range, int line)
589 if (range.end <= range.begin)
591 /* This is the first entry. */
593 range.end = line + 1;
595 else if (line < range.begin)
597 else if (range.end < line)
603 /* Return non-zero if RANGE is empty, zero otherwise. */
606 btrace_line_range_is_empty (struct btrace_line_range range)
608 return range.end <= range.begin;
611 /* Return non-zero if LHS contains RHS, zero otherwise. */
614 btrace_line_range_contains_range (struct btrace_line_range lhs,
615 struct btrace_line_range rhs)
617 return ((lhs.symtab == rhs.symtab)
618 && (lhs.begin <= rhs.begin)
619 && (rhs.end <= lhs.end));
622 /* Find the line range associated with PC. */
624 static struct btrace_line_range
625 btrace_find_line_range (CORE_ADDR pc)
627 struct btrace_line_range range;
628 struct linetable_entry *lines;
629 struct linetable *ltable;
630 struct symtab *symtab;
633 symtab = find_pc_line_symtab (pc);
635 return btrace_mk_line_range (NULL, 0, 0);
637 ltable = SYMTAB_LINETABLE (symtab);
639 return btrace_mk_line_range (symtab, 0, 0);
641 nlines = ltable->nitems;
642 lines = ltable->item;
644 return btrace_mk_line_range (symtab, 0, 0);
646 range = btrace_mk_line_range (symtab, 0, 0);
647 for (i = 0; i < nlines - 1; i++)
649 if ((lines[i].pc == pc) && (lines[i].line != 0))
650 range = btrace_line_range_add (range, lines[i].line);
656 /* Print source lines in LINES to UIOUT.
658 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
659 instructions corresponding to that source line. When printing a new source
660 line, we do the cleanups for the open chain and open a new cleanup chain for
661 the new source line. If the source line range in LINES is not empty, this
662 function will leave the cleanup chain for the last printed source line open
663 so instructions can be added to it. */
666 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
667 struct cleanup **ui_item_chain, int flags)
669 print_source_lines_flags psl_flags;
673 if (flags & DISASSEMBLY_FILENAME)
674 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
676 for (line = lines.begin; line < lines.end; ++line)
678 if (*ui_item_chain != NULL)
679 do_cleanups (*ui_item_chain);
682 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
684 print_source_lines (lines.symtab, line, line + 1, psl_flags);
686 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
690 /* Disassemble a section of the recorded instruction trace. */
693 btrace_insn_history (struct ui_out *uiout,
694 const struct btrace_thread_info *btinfo,
695 const struct btrace_insn_iterator *begin,
696 const struct btrace_insn_iterator *end, int flags)
699 struct cleanup *cleanups, *ui_item_chain;
700 struct disassemble_info di;
701 struct gdbarch *gdbarch;
702 struct btrace_insn_iterator it;
703 struct btrace_line_range last_lines;
705 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
706 btrace_insn_number (end));
708 flags |= DISASSEMBLY_SPECULATIVE;
710 gdbarch = target_gdbarch ();
711 stb = mem_fileopen ();
712 cleanups = make_cleanup_ui_file_delete (stb);
713 di = gdb_disassemble_info (gdbarch, stb);
714 last_lines = btrace_mk_line_range (NULL, 0, 0);
716 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain = NULL;
722 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
724 const struct btrace_insn *insn;
726 insn = btrace_insn_get (&it);
728 /* A NULL instruction indicates a gap in the trace. */
731 const struct btrace_config *conf;
733 conf = btrace_conf (btinfo);
735 /* We have trace so we must have a configuration. */
736 gdb_assert (conf != NULL);
738 btrace_ui_out_decode_error (uiout, it.function->errcode,
743 struct disasm_insn dinsn;
745 if ((flags & DISASSEMBLY_SOURCE) != 0)
747 struct btrace_line_range lines;
749 lines = btrace_find_line_range (insn->pc);
750 if (!btrace_line_range_is_empty (lines)
751 && !btrace_line_range_contains_range (last_lines, lines))
753 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
756 else if (ui_item_chain == NULL)
759 = make_cleanup_ui_out_tuple_begin_end (uiout,
761 /* No source information. */
762 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
765 gdb_assert (ui_item_chain != NULL);
768 memset (&dinsn, 0, sizeof (dinsn));
769 dinsn.number = btrace_insn_number (&it);
770 dinsn.addr = insn->pc;
772 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
773 dinsn.is_speculative = 1;
775 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
779 do_cleanups (cleanups);
782 /* The to_insn_history method of target record-btrace. */
785 record_btrace_insn_history (struct target_ops *self, int size, int flags)
787 struct btrace_thread_info *btinfo;
788 struct btrace_insn_history *history;
789 struct btrace_insn_iterator begin, end;
790 struct cleanup *uiout_cleanup;
791 struct ui_out *uiout;
792 unsigned int context, covered;
794 uiout = current_uiout;
795 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
797 context = abs (size);
799 error (_("Bad record instruction-history-size."));
801 btinfo = require_btrace ();
802 history = btinfo->insn_history;
805 struct btrace_insn_iterator *replay;
807 DEBUG ("insn-history (0x%x): %d", flags, size);
809 /* If we're replaying, we start at the replay position. Otherwise, we
810 start at the tail of the trace. */
811 replay = btinfo->replay;
815 btrace_insn_end (&begin, btinfo);
817 /* We start from here and expand in the requested direction. Then we
818 expand in the other direction, as well, to fill up any remaining
823 /* We want the current position covered, as well. */
824 covered = btrace_insn_next (&end, 1);
825 covered += btrace_insn_prev (&begin, context - covered);
826 covered += btrace_insn_next (&end, context - covered);
830 covered = btrace_insn_next (&end, context);
831 covered += btrace_insn_prev (&begin, context - covered);
836 begin = history->begin;
839 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
840 btrace_insn_number (&begin), btrace_insn_number (&end));
845 covered = btrace_insn_prev (&begin, context);
850 covered = btrace_insn_next (&end, context);
855 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
859 printf_unfiltered (_("At the start of the branch trace record.\n"));
861 printf_unfiltered (_("At the end of the branch trace record.\n"));
864 btrace_set_insn_history (btinfo, &begin, &end);
865 do_cleanups (uiout_cleanup);
868 /* The to_insn_history_range method of target record-btrace. */
871 record_btrace_insn_history_range (struct target_ops *self,
872 ULONGEST from, ULONGEST to, int flags)
874 struct btrace_thread_info *btinfo;
875 struct btrace_insn_history *history;
876 struct btrace_insn_iterator begin, end;
877 struct cleanup *uiout_cleanup;
878 struct ui_out *uiout;
879 unsigned int low, high;
882 uiout = current_uiout;
883 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
888 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
890 /* Check for wrap-arounds. */
891 if (low != from || high != to)
892 error (_("Bad range."));
895 error (_("Bad range."));
897 btinfo = require_btrace ();
899 found = btrace_find_insn_by_number (&begin, btinfo, low);
901 error (_("Range out of bounds."));
903 found = btrace_find_insn_by_number (&end, btinfo, high);
906 /* Silently truncate the range. */
907 btrace_insn_end (&end, btinfo);
911 /* We want both begin and end to be inclusive. */
912 btrace_insn_next (&end, 1);
915 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
916 btrace_set_insn_history (btinfo, &begin, &end);
918 do_cleanups (uiout_cleanup);
921 /* The to_insn_history_from method of target record-btrace. */
924 record_btrace_insn_history_from (struct target_ops *self,
925 ULONGEST from, int size, int flags)
927 ULONGEST begin, end, context;
929 context = abs (size);
931 error (_("Bad record instruction-history-size."));
940 begin = from - context + 1;
945 end = from + context - 1;
947 /* Check for wrap-around. */
952 record_btrace_insn_history_range (self, begin, end, flags);
955 /* Print the instruction number range for a function call history line. */
958 btrace_call_history_insn_range (struct ui_out *uiout,
959 const struct btrace_function *bfun)
961 unsigned int begin, end, size;
963 size = VEC_length (btrace_insn_s, bfun->insn);
964 gdb_assert (size > 0);
966 begin = bfun->insn_offset;
967 end = begin + size - 1;
969 ui_out_field_uint (uiout, "insn begin", begin);
970 ui_out_text (uiout, ",");
971 ui_out_field_uint (uiout, "insn end", end);
974 /* Compute the lowest and highest source line for the instructions in BFUN
975 and return them in PBEGIN and PEND.
976 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
977 result from inlining or macro expansion. */
980 btrace_compute_src_line_range (const struct btrace_function *bfun,
981 int *pbegin, int *pend)
983 struct btrace_insn *insn;
984 struct symtab *symtab;
996 symtab = symbol_symtab (sym);
998 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1000 struct symtab_and_line sal;
1002 sal = find_pc_line (insn->pc, 0);
1003 if (sal.symtab != symtab || sal.line == 0)
1006 begin = min (begin, sal.line);
1007 end = max (end, sal.line);
1015 /* Print the source line information for a function call history line. */
1018 btrace_call_history_src_line (struct ui_out *uiout,
1019 const struct btrace_function *bfun)
1028 ui_out_field_string (uiout, "file",
1029 symtab_to_filename_for_display (symbol_symtab (sym)));
1031 btrace_compute_src_line_range (bfun, &begin, &end);
1035 ui_out_text (uiout, ":");
1036 ui_out_field_int (uiout, "min line", begin);
1041 ui_out_text (uiout, ",");
1042 ui_out_field_int (uiout, "max line", end);
1045 /* Get the name of a branch trace function. */
1048 btrace_get_bfun_name (const struct btrace_function *bfun)
1050 struct minimal_symbol *msym;
1060 return SYMBOL_PRINT_NAME (sym);
1061 else if (msym != NULL)
1062 return MSYMBOL_PRINT_NAME (msym);
1067 /* Disassemble a section of the recorded function trace. */
1070 btrace_call_history (struct ui_out *uiout,
1071 const struct btrace_thread_info *btinfo,
1072 const struct btrace_call_iterator *begin,
1073 const struct btrace_call_iterator *end,
1076 struct btrace_call_iterator it;
1077 record_print_flags flags = (enum record_print_flag) int_flags;
1079 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1080 btrace_call_number (end));
1082 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1084 const struct btrace_function *bfun;
1085 struct minimal_symbol *msym;
1088 bfun = btrace_call_get (&it);
1092 /* Print the function index. */
1093 ui_out_field_uint (uiout, "index", bfun->number);
1094 ui_out_text (uiout, "\t");
1096 /* Indicate gaps in the trace. */
1097 if (bfun->errcode != 0)
1099 const struct btrace_config *conf;
1101 conf = btrace_conf (btinfo);
1103 /* We have trace so we must have a configuration. */
1104 gdb_assert (conf != NULL);
1106 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1111 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1113 int level = bfun->level + btinfo->level, i;
1115 for (i = 0; i < level; ++i)
1116 ui_out_text (uiout, " ");
1120 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1121 else if (msym != NULL)
1122 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1123 else if (!ui_out_is_mi_like_p (uiout))
1124 ui_out_field_string (uiout, "function", "??");
1126 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1128 ui_out_text (uiout, _("\tinst "));
1129 btrace_call_history_insn_range (uiout, bfun);
1132 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1134 ui_out_text (uiout, _("\tat "));
1135 btrace_call_history_src_line (uiout, bfun);
1138 ui_out_text (uiout, "\n");
1142 /* The to_call_history method of target record-btrace. */
1145 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1147 struct btrace_thread_info *btinfo;
1148 struct btrace_call_history *history;
1149 struct btrace_call_iterator begin, end;
1150 struct cleanup *uiout_cleanup;
1151 struct ui_out *uiout;
1152 unsigned int context, covered;
1153 record_print_flags flags = (enum record_print_flag) int_flags;
1155 uiout = current_uiout;
1156 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1158 context = abs (size);
1160 error (_("Bad record function-call-history-size."));
1162 btinfo = require_btrace ();
1163 history = btinfo->call_history;
1164 if (history == NULL)
1166 struct btrace_insn_iterator *replay;
1168 DEBUG ("call-history (0x%x): %d", int_flags, size);
1170 /* If we're replaying, we start at the replay position. Otherwise, we
1171 start at the tail of the trace. */
1172 replay = btinfo->replay;
1175 begin.function = replay->function;
1176 begin.btinfo = btinfo;
1179 btrace_call_end (&begin, btinfo);
1181 /* We start from here and expand in the requested direction. Then we
1182 expand in the other direction, as well, to fill up any remaining
1187 /* We want the current position covered, as well. */
1188 covered = btrace_call_next (&end, 1);
1189 covered += btrace_call_prev (&begin, context - covered);
1190 covered += btrace_call_next (&end, context - covered);
1194 covered = btrace_call_next (&end, context);
1195 covered += btrace_call_prev (&begin, context- covered);
1200 begin = history->begin;
1203 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1204 btrace_call_number (&begin), btrace_call_number (&end));
1209 covered = btrace_call_prev (&begin, context);
1214 covered = btrace_call_next (&end, context);
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1223 printf_unfiltered (_("At the start of the branch trace record.\n"));
1225 printf_unfiltered (_("At the end of the branch trace record.\n"));
1228 btrace_set_call_history (btinfo, &begin, &end);
1229 do_cleanups (uiout_cleanup);
1232 /* The to_call_history_range method of target record-btrace. */
1235 record_btrace_call_history_range (struct target_ops *self,
1236 ULONGEST from, ULONGEST to,
1239 struct btrace_thread_info *btinfo;
1240 struct btrace_call_history *history;
1241 struct btrace_call_iterator begin, end;
1242 struct cleanup *uiout_cleanup;
1243 struct ui_out *uiout;
1244 unsigned int low, high;
1246 record_print_flags flags = (enum record_print_flag) int_flags;
1248 uiout = current_uiout;
1249 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1254 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1256 /* Check for wrap-arounds. */
1257 if (low != from || high != to)
1258 error (_("Bad range."));
1261 error (_("Bad range."));
1263 btinfo = require_btrace ();
1265 found = btrace_find_call_by_number (&begin, btinfo, low);
1267 error (_("Range out of bounds."));
1269 found = btrace_find_call_by_number (&end, btinfo, high);
1272 /* Silently truncate the range. */
1273 btrace_call_end (&end, btinfo);
1277 /* We want both begin and end to be inclusive. */
1278 btrace_call_next (&end, 1);
1281 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1282 btrace_set_call_history (btinfo, &begin, &end);
1284 do_cleanups (uiout_cleanup);
1287 /* The to_call_history_from method of target record-btrace. */
1290 record_btrace_call_history_from (struct target_ops *self,
1291 ULONGEST from, int size,
1294 ULONGEST begin, end, context;
1295 record_print_flags flags = (enum record_print_flag) int_flags;
1297 context = abs (size);
1299 error (_("Bad record function-call-history-size."));
1308 begin = from - context + 1;
1313 end = from + context - 1;
1315 /* Check for wrap-around. */
1320 record_btrace_call_history_range (self, begin, end, flags);
1323 /* The to_record_is_replaying method of target record-btrace. */
1326 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1328 struct thread_info *tp;
1330 ALL_NON_EXITED_THREADS (tp)
1331 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1337 /* The to_record_will_replay method of target record-btrace. */
1340 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1342 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1345 /* The to_xfer_partial method of target record-btrace. */
1347 static enum target_xfer_status
1348 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1349 const char *annex, gdb_byte *readbuf,
1350 const gdb_byte *writebuf, ULONGEST offset,
1351 ULONGEST len, ULONGEST *xfered_len)
1353 struct target_ops *t;
1355 /* Filter out requests that don't make sense during replay. */
1356 if (replay_memory_access == replay_memory_access_read_only
1357 && !record_btrace_generating_corefile
1358 && record_btrace_is_replaying (ops, inferior_ptid))
1362 case TARGET_OBJECT_MEMORY:
1364 struct target_section *section;
1366 /* We do not allow writing memory in general. */
1367 if (writebuf != NULL)
1370 return TARGET_XFER_UNAVAILABLE;
1373 /* We allow reading readonly memory. */
1374 section = target_section_by_addr (ops, offset);
1375 if (section != NULL)
1377 /* Check if the section we found is readonly. */
1378 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1379 section->the_bfd_section)
1380 & SEC_READONLY) != 0)
1382 /* Truncate the request to fit into this section. */
1383 len = min (len, section->endaddr - offset);
1389 return TARGET_XFER_UNAVAILABLE;
1394 /* Forward the request. */
1396 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1397 offset, len, xfered_len);
1400 /* The to_insert_breakpoint method of target record-btrace. */
1403 record_btrace_insert_breakpoint (struct target_ops *ops,
1404 struct gdbarch *gdbarch,
1405 struct bp_target_info *bp_tgt)
1410 /* Inserting breakpoints requires accessing memory. Allow it for the
1411 duration of this function. */
1412 old = replay_memory_access;
1413 replay_memory_access = replay_memory_access_read_write;
1418 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1420 CATCH (except, RETURN_MASK_ALL)
1422 replay_memory_access = old;
1423 throw_exception (except);
1426 replay_memory_access = old;
1431 /* The to_remove_breakpoint method of target record-btrace. */
1434 record_btrace_remove_breakpoint (struct target_ops *ops,
1435 struct gdbarch *gdbarch,
1436 struct bp_target_info *bp_tgt)
1441 /* Removing breakpoints requires accessing memory. Allow it for the
1442 duration of this function. */
1443 old = replay_memory_access;
1444 replay_memory_access = replay_memory_access_read_write;
1449 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1451 CATCH (except, RETURN_MASK_ALL)
1453 replay_memory_access = old;
1454 throw_exception (except);
1457 replay_memory_access = old;
1462 /* The to_fetch_registers method of target record-btrace. */
1465 record_btrace_fetch_registers (struct target_ops *ops,
1466 struct regcache *regcache, int regno)
1468 struct btrace_insn_iterator *replay;
1469 struct thread_info *tp;
1471 tp = find_thread_ptid (inferior_ptid);
1472 gdb_assert (tp != NULL);
1474 replay = tp->btrace.replay;
1475 if (replay != NULL && !record_btrace_generating_corefile)
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1481 gdbarch = get_regcache_arch (regcache);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1486 /* We can only provide the PC register. */
1487 if (regno >= 0 && regno != pcreg)
1490 insn = btrace_insn_get (replay);
1491 gdb_assert (insn != NULL);
1493 regcache_raw_supply (regcache, regno, &insn->pc);
1497 struct target_ops *t = ops->beneath;
1499 t->to_fetch_registers (t, regcache, regno);
1503 /* The to_store_registers method of target record-btrace. */
1506 record_btrace_store_registers (struct target_ops *ops,
1507 struct regcache *regcache, int regno)
1509 struct target_ops *t;
1511 if (!record_btrace_generating_corefile
1512 && record_btrace_is_replaying (ops, inferior_ptid))
1513 error (_("Cannot write registers while replaying."));
1515 gdb_assert (may_write_registers != 0);
1518 t->to_store_registers (t, regcache, regno);
1521 /* The to_prepare_to_store method of target record-btrace. */
1524 record_btrace_prepare_to_store (struct target_ops *ops,
1525 struct regcache *regcache)
1527 struct target_ops *t;
1529 if (!record_btrace_generating_corefile
1530 && record_btrace_is_replaying (ops, inferior_ptid))
1534 t->to_prepare_to_store (t, regcache);
1537 /* The branch trace frame cache. */
1539 struct btrace_frame_cache
1542 struct thread_info *tp;
1544 /* The frame info. */
1545 struct frame_info *frame;
1547 /* The branch trace function segment. */
1548 const struct btrace_function *bfun;
1551 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1553 static htab_t bfcache;
1555 /* hash_f for htab_create_alloc of bfcache. */
1558 bfcache_hash (const void *arg)
1560 const struct btrace_frame_cache *cache
1561 = (const struct btrace_frame_cache *) arg;
1563 return htab_hash_pointer (cache->frame);
1566 /* eq_f for htab_create_alloc of bfcache. */
1569 bfcache_eq (const void *arg1, const void *arg2)
1571 const struct btrace_frame_cache *cache1
1572 = (const struct btrace_frame_cache *) arg1;
1573 const struct btrace_frame_cache *cache2
1574 = (const struct btrace_frame_cache *) arg2;
1576 return cache1->frame == cache2->frame;
1579 /* Create a new btrace frame cache. */
1581 static struct btrace_frame_cache *
1582 bfcache_new (struct frame_info *frame)
1584 struct btrace_frame_cache *cache;
1587 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1588 cache->frame = frame;
1590 slot = htab_find_slot (bfcache, cache, INSERT);
1591 gdb_assert (*slot == NULL);
1597 /* Extract the branch trace function from a branch trace frame. */
1599 static const struct btrace_function *
1600 btrace_get_frame_function (struct frame_info *frame)
1602 const struct btrace_frame_cache *cache;
1603 const struct btrace_function *bfun;
1604 struct btrace_frame_cache pattern;
1607 pattern.frame = frame;
1609 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1613 cache = (const struct btrace_frame_cache *) *slot;
1617 /* Implement stop_reason method for record_btrace_frame_unwind. */
1619 static enum unwind_stop_reason
1620 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun;
1626 cache = (const struct btrace_frame_cache *) *this_cache;
1628 gdb_assert (bfun != NULL);
1630 if (bfun->up == NULL)
1631 return UNWIND_UNAVAILABLE;
1633 return UNWIND_NO_REASON;
1636 /* Implement this_id method for record_btrace_frame_unwind. */
1639 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1640 struct frame_id *this_id)
1642 const struct btrace_frame_cache *cache;
1643 const struct btrace_function *bfun;
1644 CORE_ADDR code, special;
1646 cache = (const struct btrace_frame_cache *) *this_cache;
1649 gdb_assert (bfun != NULL);
1651 while (bfun->segment.prev != NULL)
1652 bfun = bfun->segment.prev;
1654 code = get_frame_func (this_frame);
1655 special = bfun->number;
1657 *this_id = frame_id_build_unavailable_stack_special (code, special);
1659 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1660 btrace_get_bfun_name (cache->bfun),
1661 core_addr_to_string_nz (this_id->code_addr),
1662 core_addr_to_string_nz (this_id->special_addr));
1665 /* Implement prev_register method for record_btrace_frame_unwind. */
1667 static struct value *
1668 record_btrace_frame_prev_register (struct frame_info *this_frame,
1672 const struct btrace_frame_cache *cache;
1673 const struct btrace_function *bfun, *caller;
1674 const struct btrace_insn *insn;
1675 struct gdbarch *gdbarch;
1679 gdbarch = get_frame_arch (this_frame);
1680 pcreg = gdbarch_pc_regnum (gdbarch);
1681 if (pcreg < 0 || regnum != pcreg)
1682 throw_error (NOT_AVAILABLE_ERROR,
1683 _("Registers are not available in btrace record history"));
1685 cache = (const struct btrace_frame_cache *) *this_cache;
1687 gdb_assert (bfun != NULL);
1691 throw_error (NOT_AVAILABLE_ERROR,
1692 _("No caller in btrace record history"));
1694 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1696 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1701 insn = VEC_last (btrace_insn_s, caller->insn);
1704 pc += gdb_insn_length (gdbarch, pc);
1707 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1708 btrace_get_bfun_name (bfun), bfun->level,
1709 core_addr_to_string_nz (pc));
1711 return frame_unwind_got_address (this_frame, regnum, pc);
1714 /* Implement sniffer method for record_btrace_frame_unwind. */
1717 record_btrace_frame_sniffer (const struct frame_unwind *self,
1718 struct frame_info *this_frame,
1721 const struct btrace_function *bfun;
1722 struct btrace_frame_cache *cache;
1723 struct thread_info *tp;
1724 struct frame_info *next;
1726 /* THIS_FRAME does not contain a reference to its thread. */
1727 tp = find_thread_ptid (inferior_ptid);
1728 gdb_assert (tp != NULL);
1731 next = get_next_frame (this_frame);
1734 const struct btrace_insn_iterator *replay;
1736 replay = tp->btrace.replay;
1738 bfun = replay->function;
1742 const struct btrace_function *callee;
1744 callee = btrace_get_frame_function (next);
1745 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1752 DEBUG ("[frame] sniffed frame for %s on level %d",
1753 btrace_get_bfun_name (bfun), bfun->level);
1755 /* This is our frame. Initialize the frame cache. */
1756 cache = bfcache_new (this_frame);
1760 *this_cache = cache;
1764 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1767 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1768 struct frame_info *this_frame,
1771 const struct btrace_function *bfun, *callee;
1772 struct btrace_frame_cache *cache;
1773 struct frame_info *next;
1775 next = get_next_frame (this_frame);
1779 callee = btrace_get_frame_function (next);
1783 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1790 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1791 btrace_get_bfun_name (bfun), bfun->level);
1793 /* This is our frame. Initialize the frame cache. */
1794 cache = bfcache_new (this_frame);
1795 cache->tp = find_thread_ptid (inferior_ptid);
1798 *this_cache = cache;
1803 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1805 struct btrace_frame_cache *cache;
1808 cache = (struct btrace_frame_cache *) this_cache;
1810 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1811 gdb_assert (slot != NULL);
1813 htab_remove_elt (bfcache, cache);
1816 /* btrace recording does not store previous memory content, neither the stack
1817 frames content. Any unwinding would return errorneous results as the stack
1818 contents no longer matches the changed PC value restored from history.
1819 Therefore this unwinder reports any possibly unwound registers as
1822 const struct frame_unwind record_btrace_frame_unwind =
1825 record_btrace_frame_unwind_stop_reason,
1826 record_btrace_frame_this_id,
1827 record_btrace_frame_prev_register,
1829 record_btrace_frame_sniffer,
1830 record_btrace_frame_dealloc_cache
1833 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1836 record_btrace_frame_unwind_stop_reason,
1837 record_btrace_frame_this_id,
1838 record_btrace_frame_prev_register,
1840 record_btrace_tailcall_frame_sniffer,
1841 record_btrace_frame_dealloc_cache
1844 /* Implement the to_get_unwinder method. */
1846 static const struct frame_unwind *
1847 record_btrace_to_get_unwinder (struct target_ops *self)
1849 return &record_btrace_frame_unwind;
1852 /* Implement the to_get_tailcall_unwinder method. */
1854 static const struct frame_unwind *
1855 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1857 return &record_btrace_tailcall_frame_unwind;
1860 /* Return a human-readable string for FLAG. */
1863 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1871 return "reverse-step";
1877 return "reverse-cont";
1886 /* Indicate that TP should be resumed according to FLAG. */
1889 record_btrace_resume_thread (struct thread_info *tp,
1890 enum btrace_thread_flag flag)
1892 struct btrace_thread_info *btinfo;
1894 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1895 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1897 btinfo = &tp->btrace;
1899 /* Fetch the latest branch trace. */
1902 /* A resume request overwrites a preceding resume or stop request. */
1903 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1904 btinfo->flags |= flag;
1907 /* Get the current frame for TP. */
1909 static struct frame_info *
1910 get_thread_current_frame (struct thread_info *tp)
1912 struct frame_info *frame;
1913 ptid_t old_inferior_ptid;
1916 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1917 old_inferior_ptid = inferior_ptid;
1918 inferior_ptid = tp->ptid;
1920 /* Clear the executing flag to allow changes to the current frame.
1921 We are not actually running, yet. We just started a reverse execution
1922 command or a record goto command.
1923 For the latter, EXECUTING is false and this has no effect.
1924 For the former, EXECUTING is true and we're in to_wait, about to
1925 move the thread. Since we need to recompute the stack, we temporarily
1926 set EXECUTING to flase. */
1927 executing = is_executing (inferior_ptid);
1928 set_executing (inferior_ptid, 0);
1933 frame = get_current_frame ();
1935 CATCH (except, RETURN_MASK_ALL)
1937 /* Restore the previous execution state. */
1938 set_executing (inferior_ptid, executing);
1940 /* Restore the previous inferior_ptid. */
1941 inferior_ptid = old_inferior_ptid;
1943 throw_exception (except);
1947 /* Restore the previous execution state. */
1948 set_executing (inferior_ptid, executing);
1950 /* Restore the previous inferior_ptid. */
1951 inferior_ptid = old_inferior_ptid;
1956 /* Start replaying a thread. */
1958 static struct btrace_insn_iterator *
1959 record_btrace_start_replaying (struct thread_info *tp)
1961 struct btrace_insn_iterator *replay;
1962 struct btrace_thread_info *btinfo;
1964 btinfo = &tp->btrace;
1967 /* We can't start replaying without trace. */
1968 if (btinfo->begin == NULL)
1971 /* GDB stores the current frame_id when stepping in order to detects steps
1973 Since frames are computed differently when we're replaying, we need to
1974 recompute those stored frames and fix them up so we can still detect
1975 subroutines after we started replaying. */
1978 struct frame_info *frame;
1979 struct frame_id frame_id;
1980 int upd_step_frame_id, upd_step_stack_frame_id;
1982 /* The current frame without replaying - computed via normal unwind. */
1983 frame = get_thread_current_frame (tp);
1984 frame_id = get_frame_id (frame);
1986 /* Check if we need to update any stepping-related frame id's. */
1987 upd_step_frame_id = frame_id_eq (frame_id,
1988 tp->control.step_frame_id);
1989 upd_step_stack_frame_id = frame_id_eq (frame_id,
1990 tp->control.step_stack_frame_id);
1992 /* We start replaying at the end of the branch trace. This corresponds
1993 to the current instruction. */
1994 replay = XNEW (struct btrace_insn_iterator);
1995 btrace_insn_end (replay, btinfo);
1997 /* Skip gaps at the end of the trace. */
1998 while (btrace_insn_get (replay) == NULL)
2002 steps = btrace_insn_prev (replay, 1);
2004 error (_("No trace."));
2007 /* We're not replaying, yet. */
2008 gdb_assert (btinfo->replay == NULL);
2009 btinfo->replay = replay;
2011 /* Make sure we're not using any stale registers. */
2012 registers_changed_ptid (tp->ptid);
2014 /* The current frame with replaying - computed via btrace unwind. */
2015 frame = get_thread_current_frame (tp);
2016 frame_id = get_frame_id (frame);
2018 /* Replace stepping related frames where necessary. */
2019 if (upd_step_frame_id)
2020 tp->control.step_frame_id = frame_id;
2021 if (upd_step_stack_frame_id)
2022 tp->control.step_stack_frame_id = frame_id;
2024 CATCH (except, RETURN_MASK_ALL)
2026 xfree (btinfo->replay);
2027 btinfo->replay = NULL;
2029 registers_changed_ptid (tp->ptid);
2031 throw_exception (except);
2038 /* Stop replaying a thread. */
2041 record_btrace_stop_replaying (struct thread_info *tp)
2043 struct btrace_thread_info *btinfo;
2045 btinfo = &tp->btrace;
2047 xfree (btinfo->replay);
2048 btinfo->replay = NULL;
2050 /* Make sure we're not leaving any stale registers. */
2051 registers_changed_ptid (tp->ptid);
2054 /* Stop replaying TP if it is at the end of its execution history. */
2057 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2059 struct btrace_insn_iterator *replay, end;
2060 struct btrace_thread_info *btinfo;
2062 btinfo = &tp->btrace;
2063 replay = btinfo->replay;
2068 btrace_insn_end (&end, btinfo);
2070 if (btrace_insn_cmp (replay, &end) == 0)
2071 record_btrace_stop_replaying (tp);
2074 /* The to_resume method of target record-btrace. */
2077 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2078 enum gdb_signal signal)
2080 struct thread_info *tp;
2081 enum btrace_thread_flag flag, cflag;
2083 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2084 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2085 step ? "step" : "cont");
2087 /* Store the execution direction of the last resume.
2089 If there is more than one to_resume call, we have to rely on infrun
2090 to not change the execution direction in-between. */
2091 record_btrace_resume_exec_dir = execution_direction;
2093 /* As long as we're not replaying, just forward the request.
2095 For non-stop targets this means that no thread is replaying. In order to
2096 make progress, we may need to explicitly move replaying threads to the end
2097 of their execution history. */
2098 if ((execution_direction != EXEC_REVERSE)
2099 && !record_btrace_is_replaying (ops, minus_one_ptid))
2102 ops->to_resume (ops, ptid, step, signal);
2106 /* Compute the btrace thread flag for the requested move. */
2107 if (execution_direction == EXEC_REVERSE)
2109 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2114 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2118 /* We just indicate the resume intent here. The actual stepping happens in
2119 record_btrace_wait below.
2121 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2122 if (!target_is_non_stop_p ())
2124 gdb_assert (ptid_match (inferior_ptid, ptid));
2126 ALL_NON_EXITED_THREADS (tp)
2127 if (ptid_match (tp->ptid, ptid))
2129 if (ptid_match (tp->ptid, inferior_ptid))
2130 record_btrace_resume_thread (tp, flag);
2132 record_btrace_resume_thread (tp, cflag);
2137 ALL_NON_EXITED_THREADS (tp)
2138 if (ptid_match (tp->ptid, ptid))
2139 record_btrace_resume_thread (tp, flag);
2142 /* Async support. */
2143 if (target_can_async_p ())
2146 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2150 /* Cancel resuming TP. */
2153 record_btrace_cancel_resume (struct thread_info *tp)
2155 enum btrace_thread_flag flags;
2157 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2161 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2162 print_thread_id (tp),
2163 target_pid_to_str (tp->ptid), flags,
2164 btrace_thread_flag_to_str (flags));
2166 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2167 record_btrace_stop_replaying_at_end (tp);
2170 /* Return a target_waitstatus indicating that we ran out of history. */
2172 static struct target_waitstatus
2173 btrace_step_no_history (void)
2175 struct target_waitstatus status;
2177 status.kind = TARGET_WAITKIND_NO_HISTORY;
2182 /* Return a target_waitstatus indicating that a step finished. */
2184 static struct target_waitstatus
2185 btrace_step_stopped (void)
2187 struct target_waitstatus status;
2189 status.kind = TARGET_WAITKIND_STOPPED;
2190 status.value.sig = GDB_SIGNAL_TRAP;
2195 /* Return a target_waitstatus indicating that a thread was stopped as
2198 static struct target_waitstatus
2199 btrace_step_stopped_on_request (void)
2201 struct target_waitstatus status;
2203 status.kind = TARGET_WAITKIND_STOPPED;
2204 status.value.sig = GDB_SIGNAL_0;
2209 /* Return a target_waitstatus indicating a spurious stop. */
2211 static struct target_waitstatus
2212 btrace_step_spurious (void)
2214 struct target_waitstatus status;
2216 status.kind = TARGET_WAITKIND_SPURIOUS;
2221 /* Return a target_waitstatus indicating that the thread was not resumed. */
2223 static struct target_waitstatus
2224 btrace_step_no_resumed (void)
2226 struct target_waitstatus status;
2228 status.kind = TARGET_WAITKIND_NO_RESUMED;
2233 /* Return a target_waitstatus indicating that we should wait again. */
2235 static struct target_waitstatus
2236 btrace_step_again (void)
2238 struct target_waitstatus status;
2240 status.kind = TARGET_WAITKIND_IGNORE;
2245 /* Clear the record histories. */
2248 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2250 xfree (btinfo->insn_history);
2251 xfree (btinfo->call_history);
2253 btinfo->insn_history = NULL;
2254 btinfo->call_history = NULL;
2257 /* Check whether TP's current replay position is at a breakpoint. */
2260 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2262 struct btrace_insn_iterator *replay;
2263 struct btrace_thread_info *btinfo;
2264 const struct btrace_insn *insn;
2265 struct inferior *inf;
2267 btinfo = &tp->btrace;
2268 replay = btinfo->replay;
2273 insn = btrace_insn_get (replay);
2277 inf = find_inferior_ptid (tp->ptid);
2281 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2282 &btinfo->stop_reason);
2285 /* Step one instruction in forward direction. */
2287 static struct target_waitstatus
2288 record_btrace_single_step_forward (struct thread_info *tp)
2290 struct btrace_insn_iterator *replay, end;
2291 struct btrace_thread_info *btinfo;
2293 btinfo = &tp->btrace;
2294 replay = btinfo->replay;
2296 /* We're done if we're not replaying. */
2298 return btrace_step_no_history ();
2300 /* Check if we're stepping a breakpoint. */
2301 if (record_btrace_replay_at_breakpoint (tp))
2302 return btrace_step_stopped ();
2304 /* Skip gaps during replay. */
2309 /* We will bail out here if we continue stepping after reaching the end
2310 of the execution history. */
2311 steps = btrace_insn_next (replay, 1);
2313 return btrace_step_no_history ();
2315 while (btrace_insn_get (replay) == NULL);
2317 /* Determine the end of the instruction trace. */
2318 btrace_insn_end (&end, btinfo);
2320 /* The execution trace contains (and ends with) the current instruction.
2321 This instruction has not been executed, yet, so the trace really ends
2322 one instruction earlier. */
2323 if (btrace_insn_cmp (replay, &end) == 0)
2324 return btrace_step_no_history ();
2326 return btrace_step_spurious ();
2329 /* Step one instruction in backward direction. */
2331 static struct target_waitstatus
2332 record_btrace_single_step_backward (struct thread_info *tp)
2334 struct btrace_insn_iterator *replay;
2335 struct btrace_thread_info *btinfo;
2337 btinfo = &tp->btrace;
2338 replay = btinfo->replay;
2340 /* Start replaying if we're not already doing so. */
2342 replay = record_btrace_start_replaying (tp);
2344 /* If we can't step any further, we reached the end of the history.
2345 Skip gaps during replay. */
2350 steps = btrace_insn_prev (replay, 1);
2352 return btrace_step_no_history ();
2354 while (btrace_insn_get (replay) == NULL);
2356 /* Check if we're stepping a breakpoint.
2358 For reverse-stepping, this check is after the step. There is logic in
2359 infrun.c that handles reverse-stepping separately. See, for example,
2360 proceed and adjust_pc_after_break.
2362 This code assumes that for reverse-stepping, PC points to the last
2363 de-executed instruction, whereas for forward-stepping PC points to the
2364 next to-be-executed instruction. */
2365 if (record_btrace_replay_at_breakpoint (tp))
2366 return btrace_step_stopped ();
2368 return btrace_step_spurious ();
2371 /* Step a single thread. */
2373 static struct target_waitstatus
2374 record_btrace_step_thread (struct thread_info *tp)
2376 struct btrace_thread_info *btinfo;
2377 struct target_waitstatus status;
2378 enum btrace_thread_flag flags;
2380 btinfo = &tp->btrace;
2382 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2383 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2385 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2386 target_pid_to_str (tp->ptid), flags,
2387 btrace_thread_flag_to_str (flags));
2389 /* We can't step without an execution history. */
2390 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2391 return btrace_step_no_history ();
2396 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2399 return btrace_step_stopped_on_request ();
2402 status = record_btrace_single_step_forward (tp);
2403 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2406 return btrace_step_stopped ();
2409 status = record_btrace_single_step_backward (tp);
2410 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2413 return btrace_step_stopped ();
2416 status = record_btrace_single_step_forward (tp);
2417 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2420 btinfo->flags |= flags;
2421 return btrace_step_again ();
2424 status = record_btrace_single_step_backward (tp);
2425 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2428 btinfo->flags |= flags;
2429 return btrace_step_again ();
2432 /* We keep threads moving at the end of their execution history. The to_wait
2433 method will stop the thread for whom the event is reported. */
2434 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2435 btinfo->flags |= flags;
2440 /* A vector of threads. */
2442 typedef struct thread_info * tp_t;
2445 /* Announce further events if necessary. */
2448 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2449 const VEC (tp_t) *no_history)
2451 int more_moving, more_no_history;
2453 more_moving = !VEC_empty (tp_t, moving);
2454 more_no_history = !VEC_empty (tp_t, no_history);
2456 if (!more_moving && !more_no_history)
2460 DEBUG ("movers pending");
2462 if (more_no_history)
2463 DEBUG ("no-history pending");
2465 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2468 /* The to_wait method of target record-btrace. */
2471 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2472 struct target_waitstatus *status, int options)
2474 VEC (tp_t) *moving, *no_history;
2475 struct thread_info *tp, *eventing;
2476 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2478 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2480 /* As long as we're not replaying, just forward the request. */
2481 if ((execution_direction != EXEC_REVERSE)
2482 && !record_btrace_is_replaying (ops, minus_one_ptid))
2485 return ops->to_wait (ops, ptid, status, options);
2491 make_cleanup (VEC_cleanup (tp_t), &moving);
2492 make_cleanup (VEC_cleanup (tp_t), &no_history);
2494 /* Keep a work list of moving threads. */
2495 ALL_NON_EXITED_THREADS (tp)
2496 if (ptid_match (tp->ptid, ptid)
2497 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2498 VEC_safe_push (tp_t, moving, tp);
2500 if (VEC_empty (tp_t, moving))
2502 *status = btrace_step_no_resumed ();
2504 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2505 target_waitstatus_to_string (status));
2507 do_cleanups (cleanups);
2511 /* Step moving threads one by one, one step each, until either one thread
2512 reports an event or we run out of threads to step.
2514 When stepping more than one thread, chances are that some threads reach
2515 the end of their execution history earlier than others. If we reported
2516 this immediately, all-stop on top of non-stop would stop all threads and
2517 resume the same threads next time. And we would report the same thread
2518 having reached the end of its execution history again.
2520 In the worst case, this would starve the other threads. But even if other
2521 threads would be allowed to make progress, this would result in far too
2522 many intermediate stops.
2524 We therefore delay the reporting of "no execution history" until we have
2525 nothing else to report. By this time, all threads should have moved to
2526 either the beginning or the end of their execution history. There will
2527 be a single user-visible stop. */
2529 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2534 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2536 *status = record_btrace_step_thread (tp);
2538 switch (status->kind)
2540 case TARGET_WAITKIND_IGNORE:
2544 case TARGET_WAITKIND_NO_HISTORY:
2545 VEC_safe_push (tp_t, no_history,
2546 VEC_ordered_remove (tp_t, moving, ix));
2550 eventing = VEC_unordered_remove (tp_t, moving, ix);
2556 if (eventing == NULL)
2558 /* We started with at least one moving thread. This thread must have
2559 either stopped or reached the end of its execution history.
2561 In the former case, EVENTING must not be NULL.
2562 In the latter case, NO_HISTORY must not be empty. */
2563 gdb_assert (!VEC_empty (tp_t, no_history));
2565 /* We kept threads moving at the end of their execution history. Stop
2566 EVENTING now that we are going to report its stop. */
2567 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2568 eventing->btrace.flags &= ~BTHR_MOVE;
2570 *status = btrace_step_no_history ();
2573 gdb_assert (eventing != NULL);
2575 /* We kept threads replaying at the end of their execution history. Stop
2576 replaying EVENTING now that we are going to report its stop. */
2577 record_btrace_stop_replaying_at_end (eventing);
2579 /* Stop all other threads. */
2580 if (!target_is_non_stop_p ())
2581 ALL_NON_EXITED_THREADS (tp)
2582 record_btrace_cancel_resume (tp);
2584 /* In async mode, we need to announce further events. */
2585 if (target_is_async_p ())
2586 record_btrace_maybe_mark_async_event (moving, no_history);
2588 /* Start record histories anew from the current position. */
2589 record_btrace_clear_histories (&eventing->btrace);
2591 /* We moved the replay position but did not update registers. */
2592 registers_changed_ptid (eventing->ptid);
2594 DEBUG ("wait ended by thread %s (%s): %s",
2595 print_thread_id (eventing),
2596 target_pid_to_str (eventing->ptid),
2597 target_waitstatus_to_string (status));
2599 do_cleanups (cleanups);
2600 return eventing->ptid;
2603 /* The to_stop method of target record-btrace. */
2606 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2608 DEBUG ("stop %s", target_pid_to_str (ptid));
2610 /* As long as we're not replaying, just forward the request. */
2611 if ((execution_direction != EXEC_REVERSE)
2612 && !record_btrace_is_replaying (ops, minus_one_ptid))
2615 ops->to_stop (ops, ptid);
2619 struct thread_info *tp;
2621 ALL_NON_EXITED_THREADS (tp)
2622 if (ptid_match (tp->ptid, ptid))
2624 tp->btrace.flags &= ~BTHR_MOVE;
2625 tp->btrace.flags |= BTHR_STOP;
2630 /* The to_can_execute_reverse method of target record-btrace. */
2633 record_btrace_can_execute_reverse (struct target_ops *self)
2638 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2641 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2643 if (record_btrace_is_replaying (ops, minus_one_ptid))
2645 struct thread_info *tp = inferior_thread ();
2647 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2650 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2653 /* The to_supports_stopped_by_sw_breakpoint method of target
2657 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2659 if (record_btrace_is_replaying (ops, minus_one_ptid))
2662 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2665 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2668 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2670 if (record_btrace_is_replaying (ops, minus_one_ptid))
2672 struct thread_info *tp = inferior_thread ();
2674 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2677 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2680 /* The to_supports_stopped_by_hw_breakpoint method of target
2684 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2686 if (record_btrace_is_replaying (ops, minus_one_ptid))
2689 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2692 /* The to_update_thread_list method of target record-btrace. */
2695 record_btrace_update_thread_list (struct target_ops *ops)
2697 /* We don't add or remove threads during replay. */
2698 if (record_btrace_is_replaying (ops, minus_one_ptid))
2701 /* Forward the request. */
2703 ops->to_update_thread_list (ops);
2706 /* The to_thread_alive method of target record-btrace. */
2709 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2711 /* We don't add or remove threads during replay. */
2712 if (record_btrace_is_replaying (ops, minus_one_ptid))
2713 return find_thread_ptid (ptid) != NULL;
2715 /* Forward the request. */
2717 return ops->to_thread_alive (ops, ptid);
2720 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2724 record_btrace_set_replay (struct thread_info *tp,
2725 const struct btrace_insn_iterator *it)
2727 struct btrace_thread_info *btinfo;
2729 btinfo = &tp->btrace;
2731 if (it == NULL || it->function == NULL)
2732 record_btrace_stop_replaying (tp);
2735 if (btinfo->replay == NULL)
2736 record_btrace_start_replaying (tp);
2737 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2740 *btinfo->replay = *it;
2741 registers_changed_ptid (tp->ptid);
2744 /* Start anew from the new replay position. */
2745 record_btrace_clear_histories (btinfo);
2747 stop_pc = regcache_read_pc (get_current_regcache ());
2748 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2751 /* The to_goto_record_begin method of target record-btrace. */
2754 record_btrace_goto_begin (struct target_ops *self)
2756 struct thread_info *tp;
2757 struct btrace_insn_iterator begin;
2759 tp = require_btrace_thread ();
2761 btrace_insn_begin (&begin, &tp->btrace);
2762 record_btrace_set_replay (tp, &begin);
2765 /* The to_goto_record_end method of target record-btrace. */
2768 record_btrace_goto_end (struct target_ops *ops)
2770 struct thread_info *tp;
2772 tp = require_btrace_thread ();
2774 record_btrace_set_replay (tp, NULL);
2777 /* The to_goto_record method of target record-btrace. */
2780 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator it;
2784 unsigned int number;
2789 /* Check for wrap-arounds. */
2791 error (_("Instruction number out of range."));
2793 tp = require_btrace_thread ();
2795 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2797 error (_("No such instruction."));
2799 record_btrace_set_replay (tp, &it);
2802 /* The to_record_stop_replaying method of target record-btrace. */
2805 record_btrace_stop_replaying_all (struct target_ops *self)
2807 struct thread_info *tp;
2809 ALL_NON_EXITED_THREADS (tp)
2810 record_btrace_stop_replaying (tp);
2813 /* The to_execution_direction target method. */
2815 static enum exec_direction_kind
2816 record_btrace_execution_direction (struct target_ops *self)
2818 return record_btrace_resume_exec_dir;
2821 /* The to_prepare_to_generate_core target method. */
2824 record_btrace_prepare_to_generate_core (struct target_ops *self)
2826 record_btrace_generating_corefile = 1;
2829 /* The to_done_generating_core target method. */
2832 record_btrace_done_generating_core (struct target_ops *self)
2834 record_btrace_generating_corefile = 0;
2837 /* Initialize the record-btrace target ops. */
2840 init_record_btrace_ops (void)
2842 struct target_ops *ops;
2844 ops = &record_btrace_ops;
2845 ops->to_shortname = "record-btrace";
2846 ops->to_longname = "Branch tracing target";
2847 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2848 ops->to_open = record_btrace_open;
2849 ops->to_close = record_btrace_close;
2850 ops->to_async = record_btrace_async;
2851 ops->to_detach = record_detach;
2852 ops->to_disconnect = record_btrace_disconnect;
2853 ops->to_mourn_inferior = record_mourn_inferior;
2854 ops->to_kill = record_kill;
2855 ops->to_stop_recording = record_btrace_stop_recording;
2856 ops->to_info_record = record_btrace_info;
2857 ops->to_insn_history = record_btrace_insn_history;
2858 ops->to_insn_history_from = record_btrace_insn_history_from;
2859 ops->to_insn_history_range = record_btrace_insn_history_range;
2860 ops->to_call_history = record_btrace_call_history;
2861 ops->to_call_history_from = record_btrace_call_history_from;
2862 ops->to_call_history_range = record_btrace_call_history_range;
2863 ops->to_record_is_replaying = record_btrace_is_replaying;
2864 ops->to_record_will_replay = record_btrace_will_replay;
2865 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2866 ops->to_xfer_partial = record_btrace_xfer_partial;
2867 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2868 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2869 ops->to_fetch_registers = record_btrace_fetch_registers;
2870 ops->to_store_registers = record_btrace_store_registers;
2871 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2872 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2873 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2874 ops->to_resume = record_btrace_resume;
2875 ops->to_wait = record_btrace_wait;
2876 ops->to_stop = record_btrace_stop;
2877 ops->to_update_thread_list = record_btrace_update_thread_list;
2878 ops->to_thread_alive = record_btrace_thread_alive;
2879 ops->to_goto_record_begin = record_btrace_goto_begin;
2880 ops->to_goto_record_end = record_btrace_goto_end;
2881 ops->to_goto_record = record_btrace_goto;
2882 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2883 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2884 ops->to_supports_stopped_by_sw_breakpoint
2885 = record_btrace_supports_stopped_by_sw_breakpoint;
2886 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2887 ops->to_supports_stopped_by_hw_breakpoint
2888 = record_btrace_supports_stopped_by_hw_breakpoint;
2889 ops->to_execution_direction = record_btrace_execution_direction;
2890 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2891 ops->to_done_generating_core = record_btrace_done_generating_core;
2892 ops->to_stratum = record_stratum;
2893 ops->to_magic = OPS_MAGIC;
2896 /* Start recording in BTS format. */
2899 cmd_record_btrace_bts_start (char *args, int from_tty)
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2904 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2908 execute_command ("target record-btrace", from_tty);
2910 CATCH (exception, RETURN_MASK_ALL)
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2913 throw_exception (exception);
2918 /* Start recording in Intel Processor Trace format. */
2921 cmd_record_btrace_pt_start (char *args, int from_tty)
2923 if (args != NULL && *args != 0)
2924 error (_("Invalid argument."));
2926 record_btrace_conf.format = BTRACE_FORMAT_PT;
2930 execute_command ("target record-btrace", from_tty);
2932 CATCH (exception, RETURN_MASK_ALL)
2934 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2935 throw_exception (exception);
2940 /* Alias for "target record". */
2943 cmd_record_btrace_start (char *args, int from_tty)
2945 if (args != NULL && *args != 0)
2946 error (_("Invalid argument."));
2948 record_btrace_conf.format = BTRACE_FORMAT_PT;
2952 execute_command ("target record-btrace", from_tty);
2954 CATCH (exception, RETURN_MASK_ALL)
2956 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2960 execute_command ("target record-btrace", from_tty);
2962 CATCH (exception, RETURN_MASK_ALL)
2964 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2965 throw_exception (exception);
2972 /* The "set record btrace" command. */
2975 cmd_set_record_btrace (char *args, int from_tty)
2977 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2980 /* The "show record btrace" command. */
2983 cmd_show_record_btrace (char *args, int from_tty)
2985 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2988 /* The "show record btrace replay-memory-access" command. */
2991 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2992 struct cmd_list_element *c, const char *value)
2994 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2995 replay_memory_access);
2998 /* The "set record btrace bts" command. */
3001 cmd_set_record_btrace_bts (char *args, int from_tty)
3003 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3004 "by an appropriate subcommand.\n"));
3005 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3006 all_commands, gdb_stdout);
3009 /* The "show record btrace bts" command. */
3012 cmd_show_record_btrace_bts (char *args, int from_tty)
3014 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3017 /* The "set record btrace pt" command. */
3020 cmd_set_record_btrace_pt (char *args, int from_tty)
3022 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3023 "by an appropriate subcommand.\n"));
3024 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3025 all_commands, gdb_stdout);
3028 /* The "show record btrace pt" command. */
3031 cmd_show_record_btrace_pt (char *args, int from_tty)
3033 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3036 /* The "record bts buffer-size" show value function. */
3039 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3040 struct cmd_list_element *c,
3043 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3047 /* The "record pt buffer-size" show value function. */
3050 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3051 struct cmd_list_element *c,
3054 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3058 void _initialize_record_btrace (void);
3060 /* Initialize btrace commands. */
3063 _initialize_record_btrace (void)
3065 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3066 _("Start branch trace recording."), &record_btrace_cmdlist,
3067 "record btrace ", 0, &record_cmdlist);
3068 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3070 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3072 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3073 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3074 This format may not be available on all processors."),
3075 &record_btrace_cmdlist);
3076 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3078 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3080 Start branch trace recording in Intel Processor Trace format.\n\n\
3081 This format may not be available on all processors."),
3082 &record_btrace_cmdlist);
3083 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3085 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3086 _("Set record options"), &set_record_btrace_cmdlist,
3087 "set record btrace ", 0, &set_record_cmdlist);
3089 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3090 _("Show record options"), &show_record_btrace_cmdlist,
3091 "show record btrace ", 0, &show_record_cmdlist);
3093 add_setshow_enum_cmd ("replay-memory-access", no_class,
3094 replay_memory_access_types, &replay_memory_access, _("\
3095 Set what memory accesses are allowed during replay."), _("\
3096 Show what memory accesses are allowed during replay."),
3097 _("Default is READ-ONLY.\n\n\
3098 The btrace record target does not trace data.\n\
3099 The memory therefore corresponds to the live target and not \
3100 to the current replay position.\n\n\
3101 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3102 When READ-WRITE, allow accesses to read-only and read-write memory during \
3104 NULL, cmd_show_replay_memory_access,
3105 &set_record_btrace_cmdlist,
3106 &show_record_btrace_cmdlist);
3108 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3109 _("Set record btrace bts options"),
3110 &set_record_btrace_bts_cmdlist,
3111 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3113 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3114 _("Show record btrace bts options"),
3115 &show_record_btrace_bts_cmdlist,
3116 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3118 add_setshow_uinteger_cmd ("buffer-size", no_class,
3119 &record_btrace_conf.bts.size,
3120 _("Set the record/replay bts buffer size."),
3121 _("Show the record/replay bts buffer size."), _("\
3122 When starting recording request a trace buffer of this size. \
3123 The actual buffer size may differ from the requested size. \
3124 Use \"info record\" to see the actual buffer size.\n\n\
3125 Bigger buffers allow longer recording but also take more time to process \
3126 the recorded execution trace.\n\n\
3127 The trace buffer size may not be changed while recording."), NULL,
3128 show_record_bts_buffer_size_value,
3129 &set_record_btrace_bts_cmdlist,
3130 &show_record_btrace_bts_cmdlist);
3132 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3133 _("Set record btrace pt options"),
3134 &set_record_btrace_pt_cmdlist,
3135 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3137 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3138 _("Show record btrace pt options"),
3139 &show_record_btrace_pt_cmdlist,
3140 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3142 add_setshow_uinteger_cmd ("buffer-size", no_class,
3143 &record_btrace_conf.pt.size,
3144 _("Set the record/replay pt buffer size."),
3145 _("Show the record/replay pt buffer size."), _("\
3146 Bigger buffers allow longer recording but also take more time to process \
3147 the recorded execution.\n\
3148 The actual buffer size may differ from the requested size. Use \"info record\" \
3149 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3150 &set_record_btrace_pt_cmdlist,
3151 &show_record_btrace_pt_cmdlist);
3153 init_record_btrace_ops ();
3154 add_target (&record_btrace_ops);
3156 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3159 record_btrace_conf.bts.size = 64 * 1024;
3160 record_btrace_conf.pt.size = 16 * 1024;