1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
91 #define DEBUG(msg, args...) \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
101 /* Update the branch trace for the current thread and return a pointer to its
104 Throws an error if there is no thread or no trace. This function never
107 static struct thread_info *
108 require_btrace_thread (void)
110 struct thread_info *tp;
114 tp = find_thread_ptid (inferior_ptid);
116 error (_("No thread."));
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
129 Throws an error if there is no thread or no trace. This function never
132 static struct btrace_thread_info *
133 require_btrace (void)
135 struct thread_info *tp;
137 tp = require_btrace_thread ();
142 /* Enable branch tracing for one thread. Warn on errors. */
145 record_btrace_enable_warn (struct thread_info *tp)
149 btrace_enable (tp, &record_btrace_conf);
151 CATCH (error, RETURN_MASK_ERROR)
153 warning ("%s", error.message);
158 /* Callback function to disable branch tracing for one thread. */
161 record_btrace_disable_callback (void *arg)
163 struct thread_info *tp = (struct thread_info *) arg;
168 /* Enable automatic tracing of new threads. */
171 record_btrace_auto_enable (void)
173 DEBUG ("attach thread observer");
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
179 /* Disable automatic tracing of new threads. */
182 record_btrace_auto_disable (void)
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
188 DEBUG ("detach thread observer");
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
194 /* The record-btrace async event handler function. */
197 record_btrace_handle_async_inferior_event (gdb_client_data data)
199 inferior_event_handler (INF_REG_EVENT, NULL);
202 /* The to_open method of target record-btrace. */
205 record_btrace_open (const char *args, int from_tty)
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
214 if (!target_has_execution)
215 error (_("The program is not being run."));
217 gdb_assert (record_btrace_thread_observer == NULL);
219 disable_chain = make_cleanup (null_cleanup, NULL);
220 ALL_NON_EXITED_THREADS (tp)
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
223 btrace_enable (tp, &record_btrace_conf);
225 make_cleanup (record_btrace_disable_callback, tp);
228 record_btrace_auto_enable ();
230 push_target (&record_btrace_ops);
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
235 record_btrace_generating_corefile = 0;
237 observer_notify_record_changed (current_inferior (), 1);
239 discard_cleanups (disable_chain);
242 /* The to_stop_recording method of target record-btrace. */
245 record_btrace_stop_recording (struct target_ops *self)
247 struct thread_info *tp;
249 DEBUG ("stop recording");
251 record_btrace_auto_disable ();
253 ALL_NON_EXITED_THREADS (tp)
254 if (tp->btrace.target != NULL)
258 /* The to_close method of target record-btrace. */
261 record_btrace_close (struct target_ops *self)
263 struct thread_info *tp;
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp)
275 btrace_teardown (tp);
278 /* The to_async method of target record-btrace. */
281 record_btrace_async (struct target_ops *ops, int enable)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
288 ops->beneath->to_async (ops->beneath, enable);
291 /* Adjusts the size and returns a human readable size suffix. */
294 record_btrace_adjust_size (unsigned int *size)
300 if ((sz & ((1u << 30) - 1)) == 0)
305 else if ((sz & ((1u << 20) - 1)) == 0)
310 else if ((sz & ((1u << 10) - 1)) == 0)
319 /* Print a BTS configuration. */
322 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
335 /* Print an Intel(R) Processor Trace configuration. */
338 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
351 /* Print a branch tracing configuration. */
354 record_btrace_print_conf (const struct btrace_config *conf)
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
359 switch (conf->format)
361 case BTRACE_FORMAT_NONE:
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
376 /* The to_info_record method of target record-btrace. */
379 record_btrace_info (struct target_ops *self)
381 struct btrace_thread_info *btinfo;
382 const struct btrace_config *conf;
383 struct thread_info *tp;
384 unsigned int insns, calls, gaps;
388 tp = find_thread_ptid (inferior_ptid);
390 error (_("No thread."));
392 btinfo = &tp->btrace;
394 conf = btrace_conf (btinfo);
396 record_btrace_print_conf (conf);
404 if (!btrace_is_empty (tp))
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
411 calls = btrace_call_number (&call);
413 btrace_insn_end (&insn, btinfo);
415 insns = btrace_insn_number (&insn);
418 /* The last instruction does not really belong to the trace. */
425 /* Skip gaps at the end. */
428 steps = btrace_insn_prev (&insn, 1);
432 insns = btrace_insn_number (&insn);
437 gaps = btinfo->ngaps;
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns, calls, gaps,
442 tp->num, target_pid_to_str (tp->ptid));
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
449 /* Print a decode error. */
452 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
458 errstr = _("unknown");
466 case BTRACE_FORMAT_BTS:
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
482 #if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
486 case BDE_PT_USER_QUIT:
488 errstr = _("trace decode cancelled");
491 case BDE_PT_DISABLED:
493 errstr = _("disabled");
496 case BDE_PT_OVERFLOW:
498 errstr = _("overflow");
503 errstr = pt_errstr (pt_errcode (errcode));
507 #endif /* defined (HAVE_LIBIPT) */
510 ui_out_text (uiout, _("["));
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
521 /* Print an unsigned int. */
524 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
526 ui_out_field_fmt (uiout, fld, "%u", val);
529 /* A range of source lines. */
531 struct btrace_line_range
533 /* The symtab this line is from. */
534 struct symtab *symtab;
536 /* The first line (inclusive). */
539 /* The last line (exclusive). */
543 /* Construct a line range. */
545 static struct btrace_line_range
546 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
548 struct btrace_line_range range;
550 range.symtab = symtab;
557 /* Add a line to a line range. */
559 static struct btrace_line_range
560 btrace_line_range_add (struct btrace_line_range range, int line)
562 if (range.end <= range.begin)
564 /* This is the first entry. */
566 range.end = line + 1;
568 else if (line < range.begin)
570 else if (range.end < line)
576 /* Return non-zero if RANGE is empty, zero otherwise. */
579 btrace_line_range_is_empty (struct btrace_line_range range)
581 return range.end <= range.begin;
584 /* Return non-zero if LHS contains RHS, zero otherwise. */
587 btrace_line_range_contains_range (struct btrace_line_range lhs,
588 struct btrace_line_range rhs)
590 return ((lhs.symtab == rhs.symtab)
591 && (lhs.begin <= rhs.begin)
592 && (rhs.end <= lhs.end));
595 /* Find the line range associated with PC. */
597 static struct btrace_line_range
598 btrace_find_line_range (CORE_ADDR pc)
600 struct btrace_line_range range;
601 struct linetable_entry *lines;
602 struct linetable *ltable;
603 struct symtab *symtab;
606 symtab = find_pc_line_symtab (pc);
608 return btrace_mk_line_range (NULL, 0, 0);
610 ltable = SYMTAB_LINETABLE (symtab);
612 return btrace_mk_line_range (symtab, 0, 0);
614 nlines = ltable->nitems;
615 lines = ltable->item;
617 return btrace_mk_line_range (symtab, 0, 0);
619 range = btrace_mk_line_range (symtab, 0, 0);
620 for (i = 0; i < nlines - 1; i++)
622 if ((lines[i].pc == pc) && (lines[i].line != 0))
623 range = btrace_line_range_add (range, lines[i].line);
629 /* Print source lines in LINES to UIOUT.
631 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
632 instructions corresponding to that source line. When printing a new source
633 line, we do the cleanups for the open chain and open a new cleanup chain for
634 the new source line. If the source line range in LINES is not empty, this
635 function will leave the cleanup chain for the last printed source line open
636 so instructions can be added to it. */
639 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
640 struct cleanup **ui_item_chain, int flags)
642 print_source_lines_flags psl_flags;
646 if (flags & DISASSEMBLY_FILENAME)
647 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
649 for (line = lines.begin; line < lines.end; ++line)
651 if (*ui_item_chain != NULL)
652 do_cleanups (*ui_item_chain);
655 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
657 print_source_lines (lines.symtab, line, line + 1, psl_flags);
659 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
663 /* Disassemble a section of the recorded instruction trace. */
666 btrace_insn_history (struct ui_out *uiout,
667 const struct btrace_thread_info *btinfo,
668 const struct btrace_insn_iterator *begin,
669 const struct btrace_insn_iterator *end, int flags)
672 struct cleanup *cleanups, *ui_item_chain;
673 struct disassemble_info di;
674 struct gdbarch *gdbarch;
675 struct btrace_insn_iterator it;
676 struct btrace_line_range last_lines;
678 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
679 btrace_insn_number (end));
681 flags |= DISASSEMBLY_SPECULATIVE;
683 gdbarch = target_gdbarch ();
684 stb = mem_fileopen ();
685 cleanups = make_cleanup_ui_file_delete (stb);
686 di = gdb_disassemble_info (gdbarch, stb);
687 last_lines = btrace_mk_line_range (NULL, 0, 0);
689 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
691 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
692 instructions corresponding to that line. */
693 ui_item_chain = NULL;
695 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
697 const struct btrace_insn *insn;
699 insn = btrace_insn_get (&it);
701 /* A NULL instruction indicates a gap in the trace. */
704 const struct btrace_config *conf;
706 conf = btrace_conf (btinfo);
708 /* We have trace so we must have a configuration. */
709 gdb_assert (conf != NULL);
711 btrace_ui_out_decode_error (uiout, it.function->errcode,
716 struct disasm_insn dinsn;
718 if ((flags & DISASSEMBLY_SOURCE) != 0)
720 struct btrace_line_range lines;
722 lines = btrace_find_line_range (insn->pc);
723 if (!btrace_line_range_is_empty (lines)
724 && !btrace_line_range_contains_range (last_lines, lines))
726 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
729 else if (ui_item_chain == NULL)
732 = make_cleanup_ui_out_tuple_begin_end (uiout,
734 /* No source information. */
735 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
738 gdb_assert (ui_item_chain != NULL);
741 memset (&dinsn, 0, sizeof (dinsn));
742 dinsn.number = btrace_insn_number (&it);
743 dinsn.addr = insn->pc;
745 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
746 dinsn.is_speculative = 1;
748 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
752 do_cleanups (cleanups);
755 /* The to_insn_history method of target record-btrace. */
758 record_btrace_insn_history (struct target_ops *self, int size, int flags)
760 struct btrace_thread_info *btinfo;
761 struct btrace_insn_history *history;
762 struct btrace_insn_iterator begin, end;
763 struct cleanup *uiout_cleanup;
764 struct ui_out *uiout;
765 unsigned int context, covered;
767 uiout = current_uiout;
768 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
770 context = abs (size);
772 error (_("Bad record instruction-history-size."));
774 btinfo = require_btrace ();
775 history = btinfo->insn_history;
778 struct btrace_insn_iterator *replay;
780 DEBUG ("insn-history (0x%x): %d", flags, size);
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay = btinfo->replay;
788 btrace_insn_end (&begin, btinfo);
790 /* We start from here and expand in the requested direction. Then we
791 expand in the other direction, as well, to fill up any remaining
796 /* We want the current position covered, as well. */
797 covered = btrace_insn_next (&end, 1);
798 covered += btrace_insn_prev (&begin, context - covered);
799 covered += btrace_insn_next (&end, context - covered);
803 covered = btrace_insn_next (&end, context);
804 covered += btrace_insn_prev (&begin, context - covered);
809 begin = history->begin;
812 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
813 btrace_insn_number (&begin), btrace_insn_number (&end));
818 covered = btrace_insn_prev (&begin, context);
823 covered = btrace_insn_next (&end, context);
828 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
832 printf_unfiltered (_("At the start of the branch trace record.\n"));
834 printf_unfiltered (_("At the end of the branch trace record.\n"));
837 btrace_set_insn_history (btinfo, &begin, &end);
838 do_cleanups (uiout_cleanup);
841 /* The to_insn_history_range method of target record-btrace. */
844 record_btrace_insn_history_range (struct target_ops *self,
845 ULONGEST from, ULONGEST to, int flags)
847 struct btrace_thread_info *btinfo;
848 struct btrace_insn_history *history;
849 struct btrace_insn_iterator begin, end;
850 struct cleanup *uiout_cleanup;
851 struct ui_out *uiout;
852 unsigned int low, high;
855 uiout = current_uiout;
856 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
861 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
863 /* Check for wrap-arounds. */
864 if (low != from || high != to)
865 error (_("Bad range."));
868 error (_("Bad range."));
870 btinfo = require_btrace ();
872 found = btrace_find_insn_by_number (&begin, btinfo, low);
874 error (_("Range out of bounds."));
876 found = btrace_find_insn_by_number (&end, btinfo, high);
879 /* Silently truncate the range. */
880 btrace_insn_end (&end, btinfo);
884 /* We want both begin and end to be inclusive. */
885 btrace_insn_next (&end, 1);
888 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
889 btrace_set_insn_history (btinfo, &begin, &end);
891 do_cleanups (uiout_cleanup);
894 /* The to_insn_history_from method of target record-btrace. */
897 record_btrace_insn_history_from (struct target_ops *self,
898 ULONGEST from, int size, int flags)
900 ULONGEST begin, end, context;
902 context = abs (size);
904 error (_("Bad record instruction-history-size."));
913 begin = from - context + 1;
918 end = from + context - 1;
920 /* Check for wrap-around. */
925 record_btrace_insn_history_range (self, begin, end, flags);
928 /* Print the instruction number range for a function call history line. */
931 btrace_call_history_insn_range (struct ui_out *uiout,
932 const struct btrace_function *bfun)
934 unsigned int begin, end, size;
936 size = VEC_length (btrace_insn_s, bfun->insn);
937 gdb_assert (size > 0);
939 begin = bfun->insn_offset;
940 end = begin + size - 1;
942 ui_out_field_uint (uiout, "insn begin", begin);
943 ui_out_text (uiout, ",");
944 ui_out_field_uint (uiout, "insn end", end);
947 /* Compute the lowest and highest source line for the instructions in BFUN
948 and return them in PBEGIN and PEND.
949 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
950 result from inlining or macro expansion. */
953 btrace_compute_src_line_range (const struct btrace_function *bfun,
954 int *pbegin, int *pend)
956 struct btrace_insn *insn;
957 struct symtab *symtab;
969 symtab = symbol_symtab (sym);
971 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
973 struct symtab_and_line sal;
975 sal = find_pc_line (insn->pc, 0);
976 if (sal.symtab != symtab || sal.line == 0)
979 begin = min (begin, sal.line);
980 end = max (end, sal.line);
988 /* Print the source line information for a function call history line. */
991 btrace_call_history_src_line (struct ui_out *uiout,
992 const struct btrace_function *bfun)
1001 ui_out_field_string (uiout, "file",
1002 symtab_to_filename_for_display (symbol_symtab (sym)));
1004 btrace_compute_src_line_range (bfun, &begin, &end);
1008 ui_out_text (uiout, ":");
1009 ui_out_field_int (uiout, "min line", begin);
1014 ui_out_text (uiout, ",");
1015 ui_out_field_int (uiout, "max line", end);
1018 /* Get the name of a branch trace function. */
1021 btrace_get_bfun_name (const struct btrace_function *bfun)
1023 struct minimal_symbol *msym;
1033 return SYMBOL_PRINT_NAME (sym);
1034 else if (msym != NULL)
1035 return MSYMBOL_PRINT_NAME (msym);
1040 /* Disassemble a section of the recorded function trace. */
1043 btrace_call_history (struct ui_out *uiout,
1044 const struct btrace_thread_info *btinfo,
1045 const struct btrace_call_iterator *begin,
1046 const struct btrace_call_iterator *end,
1049 struct btrace_call_iterator it;
1050 record_print_flags flags = (enum record_print_flag) int_flags;
1052 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1053 btrace_call_number (end));
1055 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1057 const struct btrace_function *bfun;
1058 struct minimal_symbol *msym;
1061 bfun = btrace_call_get (&it);
1065 /* Print the function index. */
1066 ui_out_field_uint (uiout, "index", bfun->number);
1067 ui_out_text (uiout, "\t");
1069 /* Indicate gaps in the trace. */
1070 if (bfun->errcode != 0)
1072 const struct btrace_config *conf;
1074 conf = btrace_conf (btinfo);
1076 /* We have trace so we must have a configuration. */
1077 gdb_assert (conf != NULL);
1079 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1084 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1086 int level = bfun->level + btinfo->level, i;
1088 for (i = 0; i < level; ++i)
1089 ui_out_text (uiout, " ");
1093 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1094 else if (msym != NULL)
1095 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1096 else if (!ui_out_is_mi_like_p (uiout))
1097 ui_out_field_string (uiout, "function", "??");
1099 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1101 ui_out_text (uiout, _("\tinst "));
1102 btrace_call_history_insn_range (uiout, bfun);
1105 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1107 ui_out_text (uiout, _("\tat "));
1108 btrace_call_history_src_line (uiout, bfun);
1111 ui_out_text (uiout, "\n");
1115 /* The to_call_history method of target record-btrace. */
1118 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1120 struct btrace_thread_info *btinfo;
1121 struct btrace_call_history *history;
1122 struct btrace_call_iterator begin, end;
1123 struct cleanup *uiout_cleanup;
1124 struct ui_out *uiout;
1125 unsigned int context, covered;
1126 record_print_flags flags = (enum record_print_flag) int_flags;
1128 uiout = current_uiout;
1129 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1131 context = abs (size);
1133 error (_("Bad record function-call-history-size."));
1135 btinfo = require_btrace ();
1136 history = btinfo->call_history;
1137 if (history == NULL)
1139 struct btrace_insn_iterator *replay;
1141 DEBUG ("call-history (0x%x): %d", int_flags, size);
1143 /* If we're replaying, we start at the replay position. Otherwise, we
1144 start at the tail of the trace. */
1145 replay = btinfo->replay;
1148 begin.function = replay->function;
1149 begin.btinfo = btinfo;
1152 btrace_call_end (&begin, btinfo);
1154 /* We start from here and expand in the requested direction. Then we
1155 expand in the other direction, as well, to fill up any remaining
1160 /* We want the current position covered, as well. */
1161 covered = btrace_call_next (&end, 1);
1162 covered += btrace_call_prev (&begin, context - covered);
1163 covered += btrace_call_next (&end, context - covered);
1167 covered = btrace_call_next (&end, context);
1168 covered += btrace_call_prev (&begin, context- covered);
1173 begin = history->begin;
1176 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1177 btrace_call_number (&begin), btrace_call_number (&end));
1182 covered = btrace_call_prev (&begin, context);
1187 covered = btrace_call_next (&end, context);
1192 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1196 printf_unfiltered (_("At the start of the branch trace record.\n"));
1198 printf_unfiltered (_("At the end of the branch trace record.\n"));
1201 btrace_set_call_history (btinfo, &begin, &end);
1202 do_cleanups (uiout_cleanup);
1205 /* The to_call_history_range method of target record-btrace. */
1208 record_btrace_call_history_range (struct target_ops *self,
1209 ULONGEST from, ULONGEST to,
1212 struct btrace_thread_info *btinfo;
1213 struct btrace_call_history *history;
1214 struct btrace_call_iterator begin, end;
1215 struct cleanup *uiout_cleanup;
1216 struct ui_out *uiout;
1217 unsigned int low, high;
1219 record_print_flags flags = (enum record_print_flag) int_flags;
1221 uiout = current_uiout;
1222 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1227 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1229 /* Check for wrap-arounds. */
1230 if (low != from || high != to)
1231 error (_("Bad range."));
1234 error (_("Bad range."));
1236 btinfo = require_btrace ();
1238 found = btrace_find_call_by_number (&begin, btinfo, low);
1240 error (_("Range out of bounds."));
1242 found = btrace_find_call_by_number (&end, btinfo, high);
1245 /* Silently truncate the range. */
1246 btrace_call_end (&end, btinfo);
1250 /* We want both begin and end to be inclusive. */
1251 btrace_call_next (&end, 1);
1254 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1255 btrace_set_call_history (btinfo, &begin, &end);
1257 do_cleanups (uiout_cleanup);
1260 /* The to_call_history_from method of target record-btrace. */
1263 record_btrace_call_history_from (struct target_ops *self,
1264 ULONGEST from, int size,
1267 ULONGEST begin, end, context;
1268 record_print_flags flags = (enum record_print_flag) int_flags;
1270 context = abs (size);
1272 error (_("Bad record function-call-history-size."));
1281 begin = from - context + 1;
1286 end = from + context - 1;
1288 /* Check for wrap-around. */
1293 record_btrace_call_history_range (self, begin, end, flags);
1296 /* The to_record_is_replaying method of target record-btrace. */
1299 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1301 struct thread_info *tp;
1303 ALL_NON_EXITED_THREADS (tp)
1304 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1310 /* The to_record_will_replay method of target record-btrace. */
1313 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1315 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1318 /* The to_xfer_partial method of target record-btrace. */
1320 static enum target_xfer_status
1321 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1322 const char *annex, gdb_byte *readbuf,
1323 const gdb_byte *writebuf, ULONGEST offset,
1324 ULONGEST len, ULONGEST *xfered_len)
1326 struct target_ops *t;
1328 /* Filter out requests that don't make sense during replay. */
1329 if (replay_memory_access == replay_memory_access_read_only
1330 && !record_btrace_generating_corefile
1331 && record_btrace_is_replaying (ops, inferior_ptid))
1335 case TARGET_OBJECT_MEMORY:
1337 struct target_section *section;
1339 /* We do not allow writing memory in general. */
1340 if (writebuf != NULL)
1343 return TARGET_XFER_UNAVAILABLE;
1346 /* We allow reading readonly memory. */
1347 section = target_section_by_addr (ops, offset);
1348 if (section != NULL)
1350 /* Check if the section we found is readonly. */
1351 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1352 section->the_bfd_section)
1353 & SEC_READONLY) != 0)
1355 /* Truncate the request to fit into this section. */
1356 len = min (len, section->endaddr - offset);
1362 return TARGET_XFER_UNAVAILABLE;
1367 /* Forward the request. */
1369 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1370 offset, len, xfered_len);
1373 /* The to_insert_breakpoint method of target record-btrace. */
1376 record_btrace_insert_breakpoint (struct target_ops *ops,
1377 struct gdbarch *gdbarch,
1378 struct bp_target_info *bp_tgt)
1383 /* Inserting breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
1391 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1393 CATCH (except, RETURN_MASK_ALL)
1395 replay_memory_access = old;
1396 throw_exception (except);
1399 replay_memory_access = old;
1404 /* The to_remove_breakpoint method of target record-btrace. */
1407 record_btrace_remove_breakpoint (struct target_ops *ops,
1408 struct gdbarch *gdbarch,
1409 struct bp_target_info *bp_tgt)
1414 /* Removing breakpoints requires accessing memory. Allow it for the
1415 duration of this function. */
1416 old = replay_memory_access;
1417 replay_memory_access = replay_memory_access_read_write;
1422 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1424 CATCH (except, RETURN_MASK_ALL)
1426 replay_memory_access = old;
1427 throw_exception (except);
1430 replay_memory_access = old;
1435 /* The to_fetch_registers method of target record-btrace. */
1438 record_btrace_fetch_registers (struct target_ops *ops,
1439 struct regcache *regcache, int regno)
1441 struct btrace_insn_iterator *replay;
1442 struct thread_info *tp;
1444 tp = find_thread_ptid (inferior_ptid);
1445 gdb_assert (tp != NULL);
1447 replay = tp->btrace.replay;
1448 if (replay != NULL && !record_btrace_generating_corefile)
1450 const struct btrace_insn *insn;
1451 struct gdbarch *gdbarch;
1454 gdbarch = get_regcache_arch (regcache);
1455 pcreg = gdbarch_pc_regnum (gdbarch);
1459 /* We can only provide the PC register. */
1460 if (regno >= 0 && regno != pcreg)
1463 insn = btrace_insn_get (replay);
1464 gdb_assert (insn != NULL);
1466 regcache_raw_supply (regcache, regno, &insn->pc);
1470 struct target_ops *t = ops->beneath;
1472 t->to_fetch_registers (t, regcache, regno);
1476 /* The to_store_registers method of target record-btrace. */
1479 record_btrace_store_registers (struct target_ops *ops,
1480 struct regcache *regcache, int regno)
1482 struct target_ops *t;
1484 if (!record_btrace_generating_corefile
1485 && record_btrace_is_replaying (ops, inferior_ptid))
1486 error (_("Cannot write registers while replaying."));
1488 gdb_assert (may_write_registers != 0);
1491 t->to_store_registers (t, regcache, regno);
1494 /* The to_prepare_to_store method of target record-btrace. */
1497 record_btrace_prepare_to_store (struct target_ops *ops,
1498 struct regcache *regcache)
1500 struct target_ops *t;
1502 if (!record_btrace_generating_corefile
1503 && record_btrace_is_replaying (ops, inferior_ptid))
1507 t->to_prepare_to_store (t, regcache);
1510 /* The branch trace frame cache. */
1512 struct btrace_frame_cache
1515 struct thread_info *tp;
1517 /* The frame info. */
1518 struct frame_info *frame;
1520 /* The branch trace function segment. */
1521 const struct btrace_function *bfun;
1524 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1526 static htab_t bfcache;
1528 /* hash_f for htab_create_alloc of bfcache. */
1531 bfcache_hash (const void *arg)
1533 const struct btrace_frame_cache *cache
1534 = (const struct btrace_frame_cache *) arg;
1536 return htab_hash_pointer (cache->frame);
1539 /* eq_f for htab_create_alloc of bfcache. */
1542 bfcache_eq (const void *arg1, const void *arg2)
1544 const struct btrace_frame_cache *cache1
1545 = (const struct btrace_frame_cache *) arg1;
1546 const struct btrace_frame_cache *cache2
1547 = (const struct btrace_frame_cache *) arg2;
1549 return cache1->frame == cache2->frame;
1552 /* Create a new btrace frame cache. */
1554 static struct btrace_frame_cache *
1555 bfcache_new (struct frame_info *frame)
1557 struct btrace_frame_cache *cache;
1560 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1561 cache->frame = frame;
1563 slot = htab_find_slot (bfcache, cache, INSERT);
1564 gdb_assert (*slot == NULL);
1570 /* Extract the branch trace function from a branch trace frame. */
1572 static const struct btrace_function *
1573 btrace_get_frame_function (struct frame_info *frame)
1575 const struct btrace_frame_cache *cache;
1576 const struct btrace_function *bfun;
1577 struct btrace_frame_cache pattern;
1580 pattern.frame = frame;
1582 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1586 cache = (const struct btrace_frame_cache *) *slot;
1590 /* Implement stop_reason method for record_btrace_frame_unwind. */
1592 static enum unwind_stop_reason
1593 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1596 const struct btrace_frame_cache *cache;
1597 const struct btrace_function *bfun;
1599 cache = (const struct btrace_frame_cache *) *this_cache;
1601 gdb_assert (bfun != NULL);
1603 if (bfun->up == NULL)
1604 return UNWIND_UNAVAILABLE;
1606 return UNWIND_NO_REASON;
1609 /* Implement this_id method for record_btrace_frame_unwind. */
1612 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1613 struct frame_id *this_id)
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun;
1617 CORE_ADDR code, special;
1619 cache = (const struct btrace_frame_cache *) *this_cache;
1622 gdb_assert (bfun != NULL);
1624 while (bfun->segment.prev != NULL)
1625 bfun = bfun->segment.prev;
1627 code = get_frame_func (this_frame);
1628 special = bfun->number;
1630 *this_id = frame_id_build_unavailable_stack_special (code, special);
1632 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1633 btrace_get_bfun_name (cache->bfun),
1634 core_addr_to_string_nz (this_id->code_addr),
1635 core_addr_to_string_nz (this_id->special_addr));
1638 /* Implement prev_register method for record_btrace_frame_unwind. */
1640 static struct value *
1641 record_btrace_frame_prev_register (struct frame_info *this_frame,
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun, *caller;
1647 const struct btrace_insn *insn;
1648 struct gdbarch *gdbarch;
1652 gdbarch = get_frame_arch (this_frame);
1653 pcreg = gdbarch_pc_regnum (gdbarch);
1654 if (pcreg < 0 || regnum != pcreg)
1655 throw_error (NOT_AVAILABLE_ERROR,
1656 _("Registers are not available in btrace record history"));
1658 cache = (const struct btrace_frame_cache *) *this_cache;
1660 gdb_assert (bfun != NULL);
1664 throw_error (NOT_AVAILABLE_ERROR,
1665 _("No caller in btrace record history"));
1667 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1669 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1674 insn = VEC_last (btrace_insn_s, caller->insn);
1677 pc += gdb_insn_length (gdbarch, pc);
1680 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1681 btrace_get_bfun_name (bfun), bfun->level,
1682 core_addr_to_string_nz (pc));
1684 return frame_unwind_got_address (this_frame, regnum, pc);
1687 /* Implement sniffer method for record_btrace_frame_unwind. */
1690 record_btrace_frame_sniffer (const struct frame_unwind *self,
1691 struct frame_info *this_frame,
1694 const struct btrace_function *bfun;
1695 struct btrace_frame_cache *cache;
1696 struct thread_info *tp;
1697 struct frame_info *next;
1699 /* THIS_FRAME does not contain a reference to its thread. */
1700 tp = find_thread_ptid (inferior_ptid);
1701 gdb_assert (tp != NULL);
1704 next = get_next_frame (this_frame);
1707 const struct btrace_insn_iterator *replay;
1709 replay = tp->btrace.replay;
1711 bfun = replay->function;
1715 const struct btrace_function *callee;
1717 callee = btrace_get_frame_function (next);
1718 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1725 DEBUG ("[frame] sniffed frame for %s on level %d",
1726 btrace_get_bfun_name (bfun), bfun->level);
1728 /* This is our frame. Initialize the frame cache. */
1729 cache = bfcache_new (this_frame);
1733 *this_cache = cache;
1737 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1740 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1741 struct frame_info *this_frame,
1744 const struct btrace_function *bfun, *callee;
1745 struct btrace_frame_cache *cache;
1746 struct frame_info *next;
1748 next = get_next_frame (this_frame);
1752 callee = btrace_get_frame_function (next);
1756 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1763 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1764 btrace_get_bfun_name (bfun), bfun->level);
1766 /* This is our frame. Initialize the frame cache. */
1767 cache = bfcache_new (this_frame);
1768 cache->tp = find_thread_ptid (inferior_ptid);
1771 *this_cache = cache;
1776 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1778 struct btrace_frame_cache *cache;
1781 cache = (struct btrace_frame_cache *) this_cache;
1783 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1784 gdb_assert (slot != NULL);
1786 htab_remove_elt (bfcache, cache);
1789 /* btrace recording does not store previous memory content, neither the stack
1790 frames content. Any unwinding would return errorneous results as the stack
1791 contents no longer matches the changed PC value restored from history.
1792 Therefore this unwinder reports any possibly unwound registers as
1795 const struct frame_unwind record_btrace_frame_unwind =
1798 record_btrace_frame_unwind_stop_reason,
1799 record_btrace_frame_this_id,
1800 record_btrace_frame_prev_register,
1802 record_btrace_frame_sniffer,
1803 record_btrace_frame_dealloc_cache
1806 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1809 record_btrace_frame_unwind_stop_reason,
1810 record_btrace_frame_this_id,
1811 record_btrace_frame_prev_register,
1813 record_btrace_tailcall_frame_sniffer,
1814 record_btrace_frame_dealloc_cache
1817 /* Implement the to_get_unwinder method. */
1819 static const struct frame_unwind *
1820 record_btrace_to_get_unwinder (struct target_ops *self)
1822 return &record_btrace_frame_unwind;
1825 /* Implement the to_get_tailcall_unwinder method. */
1827 static const struct frame_unwind *
1828 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1830 return &record_btrace_tailcall_frame_unwind;
1833 /* Return a human-readable string for FLAG. */
1836 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1844 return "reverse-step";
1850 return "reverse-cont";
1859 /* Indicate that TP should be resumed according to FLAG. */
1862 record_btrace_resume_thread (struct thread_info *tp,
1863 enum btrace_thread_flag flag)
1865 struct btrace_thread_info *btinfo;
1867 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1868 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1870 btinfo = &tp->btrace;
1872 /* Fetch the latest branch trace. */
1875 /* A resume request overwrites a preceding resume or stop request. */
1876 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1877 btinfo->flags |= flag;
1880 /* Get the current frame for TP. */
1882 static struct frame_info *
1883 get_thread_current_frame (struct thread_info *tp)
1885 struct frame_info *frame;
1886 ptid_t old_inferior_ptid;
1889 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1890 old_inferior_ptid = inferior_ptid;
1891 inferior_ptid = tp->ptid;
1893 /* Clear the executing flag to allow changes to the current frame.
1894 We are not actually running, yet. We just started a reverse execution
1895 command or a record goto command.
1896 For the latter, EXECUTING is false and this has no effect.
1897 For the former, EXECUTING is true and we're in to_wait, about to
1898 move the thread. Since we need to recompute the stack, we temporarily
1899 set EXECUTING to flase. */
1900 executing = is_executing (inferior_ptid);
1901 set_executing (inferior_ptid, 0);
1906 frame = get_current_frame ();
1908 CATCH (except, RETURN_MASK_ALL)
1910 /* Restore the previous execution state. */
1911 set_executing (inferior_ptid, executing);
1913 /* Restore the previous inferior_ptid. */
1914 inferior_ptid = old_inferior_ptid;
1916 throw_exception (except);
1920 /* Restore the previous execution state. */
1921 set_executing (inferior_ptid, executing);
1923 /* Restore the previous inferior_ptid. */
1924 inferior_ptid = old_inferior_ptid;
1929 /* Start replaying a thread. */
1931 static struct btrace_insn_iterator *
1932 record_btrace_start_replaying (struct thread_info *tp)
1934 struct btrace_insn_iterator *replay;
1935 struct btrace_thread_info *btinfo;
1937 btinfo = &tp->btrace;
1940 /* We can't start replaying without trace. */
1941 if (btinfo->begin == NULL)
1944 /* GDB stores the current frame_id when stepping in order to detects steps
1946 Since frames are computed differently when we're replaying, we need to
1947 recompute those stored frames and fix them up so we can still detect
1948 subroutines after we started replaying. */
1951 struct frame_info *frame;
1952 struct frame_id frame_id;
1953 int upd_step_frame_id, upd_step_stack_frame_id;
1955 /* The current frame without replaying - computed via normal unwind. */
1956 frame = get_thread_current_frame (tp);
1957 frame_id = get_frame_id (frame);
1959 /* Check if we need to update any stepping-related frame id's. */
1960 upd_step_frame_id = frame_id_eq (frame_id,
1961 tp->control.step_frame_id);
1962 upd_step_stack_frame_id = frame_id_eq (frame_id,
1963 tp->control.step_stack_frame_id);
1965 /* We start replaying at the end of the branch trace. This corresponds
1966 to the current instruction. */
1967 replay = XNEW (struct btrace_insn_iterator);
1968 btrace_insn_end (replay, btinfo);
1970 /* Skip gaps at the end of the trace. */
1971 while (btrace_insn_get (replay) == NULL)
1975 steps = btrace_insn_prev (replay, 1);
1977 error (_("No trace."));
1980 /* We're not replaying, yet. */
1981 gdb_assert (btinfo->replay == NULL);
1982 btinfo->replay = replay;
1984 /* Make sure we're not using any stale registers. */
1985 registers_changed_ptid (tp->ptid);
1987 /* The current frame with replaying - computed via btrace unwind. */
1988 frame = get_thread_current_frame (tp);
1989 frame_id = get_frame_id (frame);
1991 /* Replace stepping related frames where necessary. */
1992 if (upd_step_frame_id)
1993 tp->control.step_frame_id = frame_id;
1994 if (upd_step_stack_frame_id)
1995 tp->control.step_stack_frame_id = frame_id;
1997 CATCH (except, RETURN_MASK_ALL)
1999 xfree (btinfo->replay);
2000 btinfo->replay = NULL;
2002 registers_changed_ptid (tp->ptid);
2004 throw_exception (except);
2011 /* Stop replaying a thread. */
2014 record_btrace_stop_replaying (struct thread_info *tp)
2016 struct btrace_thread_info *btinfo;
2018 btinfo = &tp->btrace;
2020 xfree (btinfo->replay);
2021 btinfo->replay = NULL;
2023 /* Make sure we're not leaving any stale registers. */
2024 registers_changed_ptid (tp->ptid);
2027 /* Stop replaying TP if it is at the end of its execution history. */
2030 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2032 struct btrace_insn_iterator *replay, end;
2033 struct btrace_thread_info *btinfo;
2035 btinfo = &tp->btrace;
2036 replay = btinfo->replay;
2041 btrace_insn_end (&end, btinfo);
2043 if (btrace_insn_cmp (replay, &end) == 0)
2044 record_btrace_stop_replaying (tp);
2047 /* The to_resume method of target record-btrace. */
2050 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2051 enum gdb_signal signal)
2053 struct thread_info *tp;
2054 enum btrace_thread_flag flag, cflag;
2056 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2057 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2058 step ? "step" : "cont");
2060 /* Store the execution direction of the last resume.
2062 If there is more than one to_resume call, we have to rely on infrun
2063 to not change the execution direction in-between. */
2064 record_btrace_resume_exec_dir = execution_direction;
2066 /* As long as we're not replaying, just forward the request.
2068 For non-stop targets this means that no thread is replaying. In order to
2069 make progress, we may need to explicitly move replaying threads to the end
2070 of their execution history. */
2071 if ((execution_direction != EXEC_REVERSE)
2072 && !record_btrace_is_replaying (ops, minus_one_ptid))
2075 ops->to_resume (ops, ptid, step, signal);
2079 /* Compute the btrace thread flag for the requested move. */
2080 if (execution_direction == EXEC_REVERSE)
2082 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2087 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2091 /* We just indicate the resume intent here. The actual stepping happens in
2092 record_btrace_wait below.
2094 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2095 if (!target_is_non_stop_p ())
2097 gdb_assert (ptid_match (inferior_ptid, ptid));
2099 ALL_NON_EXITED_THREADS (tp)
2100 if (ptid_match (tp->ptid, ptid))
2102 if (ptid_match (tp->ptid, inferior_ptid))
2103 record_btrace_resume_thread (tp, flag);
2105 record_btrace_resume_thread (tp, cflag);
2110 ALL_NON_EXITED_THREADS (tp)
2111 if (ptid_match (tp->ptid, ptid))
2112 record_btrace_resume_thread (tp, flag);
2115 /* Async support. */
2116 if (target_can_async_p ())
2119 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2123 /* Cancel resuming TP. */
2126 record_btrace_cancel_resume (struct thread_info *tp)
2128 enum btrace_thread_flag flags;
2130 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2134 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
2135 target_pid_to_str (tp->ptid), flags,
2136 btrace_thread_flag_to_str (flags));
2138 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2139 record_btrace_stop_replaying_at_end (tp);
2142 /* Return a target_waitstatus indicating that we ran out of history. */
2144 static struct target_waitstatus
2145 btrace_step_no_history (void)
2147 struct target_waitstatus status;
2149 status.kind = TARGET_WAITKIND_NO_HISTORY;
2154 /* Return a target_waitstatus indicating that a step finished. */
2156 static struct target_waitstatus
2157 btrace_step_stopped (void)
2159 struct target_waitstatus status;
2161 status.kind = TARGET_WAITKIND_STOPPED;
2162 status.value.sig = GDB_SIGNAL_TRAP;
2167 /* Return a target_waitstatus indicating that a thread was stopped as
2170 static struct target_waitstatus
2171 btrace_step_stopped_on_request (void)
2173 struct target_waitstatus status;
2175 status.kind = TARGET_WAITKIND_STOPPED;
2176 status.value.sig = GDB_SIGNAL_0;
2181 /* Return a target_waitstatus indicating a spurious stop. */
2183 static struct target_waitstatus
2184 btrace_step_spurious (void)
2186 struct target_waitstatus status;
2188 status.kind = TARGET_WAITKIND_SPURIOUS;
2193 /* Return a target_waitstatus indicating that the thread was not resumed. */
2195 static struct target_waitstatus
2196 btrace_step_no_resumed (void)
2198 struct target_waitstatus status;
2200 status.kind = TARGET_WAITKIND_NO_RESUMED;
2205 /* Return a target_waitstatus indicating that we should wait again. */
2207 static struct target_waitstatus
2208 btrace_step_again (void)
2210 struct target_waitstatus status;
2212 status.kind = TARGET_WAITKIND_IGNORE;
2217 /* Clear the record histories. */
2220 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2222 xfree (btinfo->insn_history);
2223 xfree (btinfo->call_history);
2225 btinfo->insn_history = NULL;
2226 btinfo->call_history = NULL;
2229 /* Check whether TP's current replay position is at a breakpoint. */
2232 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2234 struct btrace_insn_iterator *replay;
2235 struct btrace_thread_info *btinfo;
2236 const struct btrace_insn *insn;
2237 struct inferior *inf;
2239 btinfo = &tp->btrace;
2240 replay = btinfo->replay;
2245 insn = btrace_insn_get (replay);
2249 inf = find_inferior_ptid (tp->ptid);
2253 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2254 &btinfo->stop_reason);
2257 /* Step one instruction in forward direction. */
2259 static struct target_waitstatus
2260 record_btrace_single_step_forward (struct thread_info *tp)
2262 struct btrace_insn_iterator *replay, end;
2263 struct btrace_thread_info *btinfo;
2265 btinfo = &tp->btrace;
2266 replay = btinfo->replay;
2268 /* We're done if we're not replaying. */
2270 return btrace_step_no_history ();
2272 /* Check if we're stepping a breakpoint. */
2273 if (record_btrace_replay_at_breakpoint (tp))
2274 return btrace_step_stopped ();
2276 /* Skip gaps during replay. */
2281 /* We will bail out here if we continue stepping after reaching the end
2282 of the execution history. */
2283 steps = btrace_insn_next (replay, 1);
2285 return btrace_step_no_history ();
2287 while (btrace_insn_get (replay) == NULL);
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
2295 if (btrace_insn_cmp (replay, &end) == 0)
2296 return btrace_step_no_history ();
2298 return btrace_step_spurious ();
2301 /* Step one instruction in backward direction. */
2303 static struct target_waitstatus
2304 record_btrace_single_step_backward (struct thread_info *tp)
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2312 /* Start replaying if we're not already doing so. */
2314 replay = record_btrace_start_replaying (tp);
2316 /* If we can't step any further, we reached the end of the history.
2317 Skip gaps during replay. */
2322 steps = btrace_insn_prev (replay, 1);
2324 return btrace_step_no_history ();
2326 while (btrace_insn_get (replay) == NULL);
2328 /* Check if we're stepping a breakpoint.
2330 For reverse-stepping, this check is after the step. There is logic in
2331 infrun.c that handles reverse-stepping separately. See, for example,
2332 proceed and adjust_pc_after_break.
2334 This code assumes that for reverse-stepping, PC points to the last
2335 de-executed instruction, whereas for forward-stepping PC points to the
2336 next to-be-executed instruction. */
2337 if (record_btrace_replay_at_breakpoint (tp))
2338 return btrace_step_stopped ();
2340 return btrace_step_spurious ();
2343 /* Step a single thread. */
2345 static struct target_waitstatus
2346 record_btrace_step_thread (struct thread_info *tp)
2348 struct btrace_thread_info *btinfo;
2349 struct target_waitstatus status;
2350 enum btrace_thread_flag flags;
2352 btinfo = &tp->btrace;
2354 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2355 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2357 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2358 target_pid_to_str (tp->ptid), flags,
2359 btrace_thread_flag_to_str (flags));
2361 /* We can't step without an execution history. */
2362 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2363 return btrace_step_no_history ();
2368 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2371 return btrace_step_stopped_on_request ();
2374 status = record_btrace_single_step_forward (tp);
2375 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2378 return btrace_step_stopped ();
2381 status = record_btrace_single_step_backward (tp);
2382 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2385 return btrace_step_stopped ();
2388 status = record_btrace_single_step_forward (tp);
2389 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2392 btinfo->flags |= flags;
2393 return btrace_step_again ();
2396 status = record_btrace_single_step_backward (tp);
2397 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2400 btinfo->flags |= flags;
2401 return btrace_step_again ();
2404 /* We keep threads moving at the end of their execution history. The to_wait
2405 method will stop the thread for whom the event is reported. */
2406 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2407 btinfo->flags |= flags;
2412 /* A vector of threads. */
2414 typedef struct thread_info * tp_t;
2417 /* Announce further events if necessary. */
2420 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2421 const VEC (tp_t) *no_history)
2423 int more_moving, more_no_history;
2425 more_moving = !VEC_empty (tp_t, moving);
2426 more_no_history = !VEC_empty (tp_t, no_history);
2428 if (!more_moving && !more_no_history)
2432 DEBUG ("movers pending");
2434 if (more_no_history)
2435 DEBUG ("no-history pending");
2437 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2440 /* The to_wait method of target record-btrace. */
2443 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2444 struct target_waitstatus *status, int options)
2446 VEC (tp_t) *moving, *no_history;
2447 struct thread_info *tp, *eventing;
2448 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2450 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2452 /* As long as we're not replaying, just forward the request. */
2453 if ((execution_direction != EXEC_REVERSE)
2454 && !record_btrace_is_replaying (ops, minus_one_ptid))
2457 return ops->to_wait (ops, ptid, status, options);
2463 make_cleanup (VEC_cleanup (tp_t), &moving);
2464 make_cleanup (VEC_cleanup (tp_t), &no_history);
2466 /* Keep a work list of moving threads. */
2467 ALL_NON_EXITED_THREADS (tp)
2468 if (ptid_match (tp->ptid, ptid)
2469 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2470 VEC_safe_push (tp_t, moving, tp);
2472 if (VEC_empty (tp_t, moving))
2474 *status = btrace_step_no_resumed ();
2476 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2477 target_waitstatus_to_string (status));
2479 do_cleanups (cleanups);
2483 /* Step moving threads one by one, one step each, until either one thread
2484 reports an event or we run out of threads to step.
2486 When stepping more than one thread, chances are that some threads reach
2487 the end of their execution history earlier than others. If we reported
2488 this immediately, all-stop on top of non-stop would stop all threads and
2489 resume the same threads next time. And we would report the same thread
2490 having reached the end of its execution history again.
2492 In the worst case, this would starve the other threads. But even if other
2493 threads would be allowed to make progress, this would result in far too
2494 many intermediate stops.
2496 We therefore delay the reporting of "no execution history" until we have
2497 nothing else to report. By this time, all threads should have moved to
2498 either the beginning or the end of their execution history. There will
2499 be a single user-visible stop. */
2501 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2506 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2508 *status = record_btrace_step_thread (tp);
2510 switch (status->kind)
2512 case TARGET_WAITKIND_IGNORE:
2516 case TARGET_WAITKIND_NO_HISTORY:
2517 VEC_safe_push (tp_t, no_history,
2518 VEC_ordered_remove (tp_t, moving, ix));
2522 eventing = VEC_unordered_remove (tp_t, moving, ix);
2528 if (eventing == NULL)
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!VEC_empty (tp_t, no_history));
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2542 *status = btrace_step_no_history ();
2545 gdb_assert (eventing != NULL);
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
2551 /* Stop all other threads. */
2552 if (!target_is_non_stop_p ())
2553 ALL_NON_EXITED_THREADS (tp)
2554 record_btrace_cancel_resume (tp);
2556 /* In async mode, we need to announce further events. */
2557 if (target_is_async_p ())
2558 record_btrace_maybe_mark_async_event (moving, no_history);
2560 /* Start record histories anew from the current position. */
2561 record_btrace_clear_histories (&eventing->btrace);
2563 /* We moved the replay position but did not update registers. */
2564 registers_changed_ptid (eventing->ptid);
2566 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2567 target_pid_to_str (eventing->ptid),
2568 target_waitstatus_to_string (status));
2570 do_cleanups (cleanups);
2571 return eventing->ptid;
2574 /* The to_stop method of target record-btrace. */
2577 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2579 DEBUG ("stop %s", target_pid_to_str (ptid));
2581 /* As long as we're not replaying, just forward the request. */
2582 if ((execution_direction != EXEC_REVERSE)
2583 && !record_btrace_is_replaying (ops, minus_one_ptid))
2586 ops->to_stop (ops, ptid);
2590 struct thread_info *tp;
2592 ALL_NON_EXITED_THREADS (tp)
2593 if (ptid_match (tp->ptid, ptid))
2595 tp->btrace.flags &= ~BTHR_MOVE;
2596 tp->btrace.flags |= BTHR_STOP;
2601 /* The to_can_execute_reverse method of target record-btrace. */
2604 record_btrace_can_execute_reverse (struct target_ops *self)
2609 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2612 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2614 if (record_btrace_is_replaying (ops, minus_one_ptid))
2616 struct thread_info *tp = inferior_thread ();
2618 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2621 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2624 /* The to_supports_stopped_by_sw_breakpoint method of target
2628 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2630 if (record_btrace_is_replaying (ops, minus_one_ptid))
2633 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2636 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2639 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2641 if (record_btrace_is_replaying (ops, minus_one_ptid))
2643 struct thread_info *tp = inferior_thread ();
2645 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2648 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2651 /* The to_supports_stopped_by_hw_breakpoint method of target
2655 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2657 if (record_btrace_is_replaying (ops, minus_one_ptid))
2660 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2663 /* The to_update_thread_list method of target record-btrace. */
2666 record_btrace_update_thread_list (struct target_ops *ops)
2668 /* We don't add or remove threads during replay. */
2669 if (record_btrace_is_replaying (ops, minus_one_ptid))
2672 /* Forward the request. */
2674 ops->to_update_thread_list (ops);
2677 /* The to_thread_alive method of target record-btrace. */
2680 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2682 /* We don't add or remove threads during replay. */
2683 if (record_btrace_is_replaying (ops, minus_one_ptid))
2684 return find_thread_ptid (ptid) != NULL;
2686 /* Forward the request. */
2688 return ops->to_thread_alive (ops, ptid);
2691 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2695 record_btrace_set_replay (struct thread_info *tp,
2696 const struct btrace_insn_iterator *it)
2698 struct btrace_thread_info *btinfo;
2700 btinfo = &tp->btrace;
2702 if (it == NULL || it->function == NULL)
2703 record_btrace_stop_replaying (tp);
2706 if (btinfo->replay == NULL)
2707 record_btrace_start_replaying (tp);
2708 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2711 *btinfo->replay = *it;
2712 registers_changed_ptid (tp->ptid);
2715 /* Start anew from the new replay position. */
2716 record_btrace_clear_histories (btinfo);
2718 stop_pc = regcache_read_pc (get_current_regcache ());
2719 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2722 /* The to_goto_record_begin method of target record-btrace. */
2725 record_btrace_goto_begin (struct target_ops *self)
2727 struct thread_info *tp;
2728 struct btrace_insn_iterator begin;
2730 tp = require_btrace_thread ();
2732 btrace_insn_begin (&begin, &tp->btrace);
2733 record_btrace_set_replay (tp, &begin);
2736 /* The to_goto_record_end method of target record-btrace. */
2739 record_btrace_goto_end (struct target_ops *ops)
2741 struct thread_info *tp;
2743 tp = require_btrace_thread ();
2745 record_btrace_set_replay (tp, NULL);
2748 /* The to_goto_record method of target record-btrace. */
2751 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2753 struct thread_info *tp;
2754 struct btrace_insn_iterator it;
2755 unsigned int number;
2760 /* Check for wrap-arounds. */
2762 error (_("Instruction number out of range."));
2764 tp = require_btrace_thread ();
2766 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2768 error (_("No such instruction."));
2770 record_btrace_set_replay (tp, &it);
2773 /* The to_record_stop_replaying method of target record-btrace. */
2776 record_btrace_stop_replaying_all (struct target_ops *self)
2778 struct thread_info *tp;
2780 ALL_NON_EXITED_THREADS (tp)
2781 record_btrace_stop_replaying (tp);
2784 /* The to_execution_direction target method. */
2786 static enum exec_direction_kind
2787 record_btrace_execution_direction (struct target_ops *self)
2789 return record_btrace_resume_exec_dir;
2792 /* The to_prepare_to_generate_core target method. */
2795 record_btrace_prepare_to_generate_core (struct target_ops *self)
2797 record_btrace_generating_corefile = 1;
2800 /* The to_done_generating_core target method. */
2803 record_btrace_done_generating_core (struct target_ops *self)
2805 record_btrace_generating_corefile = 0;
2808 /* Initialize the record-btrace target ops. */
2811 init_record_btrace_ops (void)
2813 struct target_ops *ops;
2815 ops = &record_btrace_ops;
2816 ops->to_shortname = "record-btrace";
2817 ops->to_longname = "Branch tracing target";
2818 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2819 ops->to_open = record_btrace_open;
2820 ops->to_close = record_btrace_close;
2821 ops->to_async = record_btrace_async;
2822 ops->to_detach = record_detach;
2823 ops->to_disconnect = record_disconnect;
2824 ops->to_mourn_inferior = record_mourn_inferior;
2825 ops->to_kill = record_kill;
2826 ops->to_stop_recording = record_btrace_stop_recording;
2827 ops->to_info_record = record_btrace_info;
2828 ops->to_insn_history = record_btrace_insn_history;
2829 ops->to_insn_history_from = record_btrace_insn_history_from;
2830 ops->to_insn_history_range = record_btrace_insn_history_range;
2831 ops->to_call_history = record_btrace_call_history;
2832 ops->to_call_history_from = record_btrace_call_history_from;
2833 ops->to_call_history_range = record_btrace_call_history_range;
2834 ops->to_record_is_replaying = record_btrace_is_replaying;
2835 ops->to_record_will_replay = record_btrace_will_replay;
2836 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2837 ops->to_xfer_partial = record_btrace_xfer_partial;
2838 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2839 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2840 ops->to_fetch_registers = record_btrace_fetch_registers;
2841 ops->to_store_registers = record_btrace_store_registers;
2842 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2843 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2844 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2845 ops->to_resume = record_btrace_resume;
2846 ops->to_wait = record_btrace_wait;
2847 ops->to_stop = record_btrace_stop;
2848 ops->to_update_thread_list = record_btrace_update_thread_list;
2849 ops->to_thread_alive = record_btrace_thread_alive;
2850 ops->to_goto_record_begin = record_btrace_goto_begin;
2851 ops->to_goto_record_end = record_btrace_goto_end;
2852 ops->to_goto_record = record_btrace_goto;
2853 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2854 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2855 ops->to_supports_stopped_by_sw_breakpoint
2856 = record_btrace_supports_stopped_by_sw_breakpoint;
2857 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2858 ops->to_supports_stopped_by_hw_breakpoint
2859 = record_btrace_supports_stopped_by_hw_breakpoint;
2860 ops->to_execution_direction = record_btrace_execution_direction;
2861 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2862 ops->to_done_generating_core = record_btrace_done_generating_core;
2863 ops->to_stratum = record_stratum;
2864 ops->to_magic = OPS_MAGIC;
2867 /* Start recording in BTS format. */
2870 cmd_record_btrace_bts_start (char *args, int from_tty)
2872 if (args != NULL && *args != 0)
2873 error (_("Invalid argument."));
2875 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2879 execute_command ("target record-btrace", from_tty);
2881 CATCH (exception, RETURN_MASK_ALL)
2883 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2884 throw_exception (exception);
2889 /* Start recording Intel(R) Processor Trace. */
2892 cmd_record_btrace_pt_start (char *args, int from_tty)
2894 if (args != NULL && *args != 0)
2895 error (_("Invalid argument."));
2897 record_btrace_conf.format = BTRACE_FORMAT_PT;
2901 execute_command ("target record-btrace", from_tty);
2903 CATCH (exception, RETURN_MASK_ALL)
2905 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2906 throw_exception (exception);
2911 /* Alias for "target record". */
2914 cmd_record_btrace_start (char *args, int from_tty)
2916 if (args != NULL && *args != 0)
2917 error (_("Invalid argument."));
2919 record_btrace_conf.format = BTRACE_FORMAT_PT;
2923 execute_command ("target record-btrace", from_tty);
2925 CATCH (exception, RETURN_MASK_ALL)
2927 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2931 execute_command ("target record-btrace", from_tty);
2933 CATCH (exception, RETURN_MASK_ALL)
2935 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2936 throw_exception (exception);
2943 /* The "set record btrace" command. */
2946 cmd_set_record_btrace (char *args, int from_tty)
2948 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2951 /* The "show record btrace" command. */
2954 cmd_show_record_btrace (char *args, int from_tty)
2956 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2959 /* The "show record btrace replay-memory-access" command. */
2962 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2963 struct cmd_list_element *c, const char *value)
2965 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2966 replay_memory_access);
2969 /* The "set record btrace bts" command. */
2972 cmd_set_record_btrace_bts (char *args, int from_tty)
2974 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2975 "by an appropriate subcommand.\n"));
2976 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2977 all_commands, gdb_stdout);
2980 /* The "show record btrace bts" command. */
2983 cmd_show_record_btrace_bts (char *args, int from_tty)
2985 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2988 /* The "set record btrace pt" command. */
2991 cmd_set_record_btrace_pt (char *args, int from_tty)
2993 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2994 "by an appropriate subcommand.\n"));
2995 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2996 all_commands, gdb_stdout);
2999 /* The "show record btrace pt" command. */
3002 cmd_show_record_btrace_pt (char *args, int from_tty)
3004 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3007 /* The "record bts buffer-size" show value function. */
3010 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3011 struct cmd_list_element *c,
3014 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3018 /* The "record pt buffer-size" show value function. */
3021 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3022 struct cmd_list_element *c,
3025 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3029 void _initialize_record_btrace (void);
3031 /* Initialize btrace commands. */
3034 _initialize_record_btrace (void)
3036 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3037 _("Start branch trace recording."), &record_btrace_cmdlist,
3038 "record btrace ", 0, &record_cmdlist);
3039 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3041 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3043 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3044 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3045 This format may not be available on all processors."),
3046 &record_btrace_cmdlist);
3047 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3049 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3051 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
3052 This format may not be available on all processors."),
3053 &record_btrace_cmdlist);
3054 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3056 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3057 _("Set record options"), &set_record_btrace_cmdlist,
3058 "set record btrace ", 0, &set_record_cmdlist);
3060 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3061 _("Show record options"), &show_record_btrace_cmdlist,
3062 "show record btrace ", 0, &show_record_cmdlist);
3064 add_setshow_enum_cmd ("replay-memory-access", no_class,
3065 replay_memory_access_types, &replay_memory_access, _("\
3066 Set what memory accesses are allowed during replay."), _("\
3067 Show what memory accesses are allowed during replay."),
3068 _("Default is READ-ONLY.\n\n\
3069 The btrace record target does not trace data.\n\
3070 The memory therefore corresponds to the live target and not \
3071 to the current replay position.\n\n\
3072 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3073 When READ-WRITE, allow accesses to read-only and read-write memory during \
3075 NULL, cmd_show_replay_memory_access,
3076 &set_record_btrace_cmdlist,
3077 &show_record_btrace_cmdlist);
3079 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3080 _("Set record btrace bts options"),
3081 &set_record_btrace_bts_cmdlist,
3082 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3084 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3085 _("Show record btrace bts options"),
3086 &show_record_btrace_bts_cmdlist,
3087 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3089 add_setshow_uinteger_cmd ("buffer-size", no_class,
3090 &record_btrace_conf.bts.size,
3091 _("Set the record/replay bts buffer size."),
3092 _("Show the record/replay bts buffer size."), _("\
3093 When starting recording request a trace buffer of this size. \
3094 The actual buffer size may differ from the requested size. \
3095 Use \"info record\" to see the actual buffer size.\n\n\
3096 Bigger buffers allow longer recording but also take more time to process \
3097 the recorded execution trace.\n\n\
3098 The trace buffer size may not be changed while recording."), NULL,
3099 show_record_bts_buffer_size_value,
3100 &set_record_btrace_bts_cmdlist,
3101 &show_record_btrace_bts_cmdlist);
3103 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3104 _("Set record btrace pt options"),
3105 &set_record_btrace_pt_cmdlist,
3106 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3108 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3109 _("Show record btrace pt options"),
3110 &show_record_btrace_pt_cmdlist,
3111 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3113 add_setshow_uinteger_cmd ("buffer-size", no_class,
3114 &record_btrace_conf.pt.size,
3115 _("Set the record/replay pt buffer size."),
3116 _("Show the record/replay pt buffer size."), _("\
3117 Bigger buffers allow longer recording but also take more time to process \
3118 the recorded execution.\n\
3119 The actual buffer size may differ from the requested size. Use \"info record\" \
3120 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3121 &set_record_btrace_pt_cmdlist,
3122 &show_record_btrace_pt_cmdlist);
3124 init_record_btrace_ops ();
3125 add_target (&record_btrace_ops);
3127 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3130 record_btrace_conf.bts.size = 64 * 1024;
3131 record_btrace_conf.pt.size = 16 * 1024;