1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info *
110 require_btrace_thread (void)
112 struct thread_info *tp;
116 tp = find_thread_ptid (inferior_ptid);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info *
137 require_btrace (void)
139 struct thread_info *tp;
141 tp = require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info *tp)
153 btrace_enable (tp, &record_btrace_conf);
155 CATCH (error, RETURN_MASK_ERROR)
157 warning ("%s", error.message);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg)
167 struct thread_info *tp = (struct thread_info *) arg;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
203 inferior_event_handler (INF_REG_EVENT, NULL);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
220 record_btrace_generating_corefile = 0;
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args, int from_tty)
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
238 if (!target_has_execution)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer == NULL);
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
247 btrace_enable (tp, &record_btrace_conf);
249 make_cleanup (record_btrace_disable_callback, tp);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops *self)
262 struct thread_info *tp;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops *self, const char *args,
279 struct target_ops *beneath = self->beneath;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops *self)
293 struct thread_info *tp;
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops *ops, int enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
318 ops->beneath->to_async (ops->beneath, enable);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size)
330 if ((sz & ((1u << 30) - 1)) == 0)
335 else if ((sz & ((1u << 20) - 1)) == 0)
340 else if ((sz & ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config *conf)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
389 switch (conf->format)
391 case BTRACE_FORMAT_NONE:
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops *self)
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
418 tp = find_thread_ptid (inferior_ptid);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo = &tp->btrace;
426 conf = btrace_conf (btinfo);
428 record_btrace_print_conf (conf);
436 if (!btrace_is_empty (tp))
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
453 gaps = btinfo->ngaps;
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
465 /* Print a decode error. */
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
471 const char *errstr = btrace_decode_error (format, errcode);
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
485 /* Print an unsigned int. */
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
490 uiout->field_fmt (fld, "%u", val);
493 /* A range of source lines. */
495 struct btrace_line_range
497 /* The symtab this line is from. */
498 struct symtab *symtab;
500 /* The first line (inclusive). */
503 /* The last line (exclusive). */
507 /* Construct a line range. */
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
512 struct btrace_line_range range;
514 range.symtab = symtab;
521 /* Add a line to a line range. */
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
526 if (range.end <= range.begin)
528 /* This is the first entry. */
530 range.end = line + 1;
532 else if (line < range.begin)
534 else if (range.end < line)
540 /* Return non-zero if RANGE is empty, zero otherwise. */
543 btrace_line_range_is_empty (struct btrace_line_range range)
545 return range.end <= range.begin;
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
559 /* Find the line range associated with PC. */
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
570 symtab = find_pc_line_symtab (pc);
572 return btrace_mk_line_range (NULL, 0, 0);
574 ltable = SYMTAB_LINETABLE (symtab);
576 return btrace_mk_line_range (symtab, 0, 0);
578 nlines = ltable->nitems;
579 lines = ltable->item;
581 return btrace_mk_line_range (symtab, 0, 0);
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
593 /* Print source lines in LINES to UIOUT.
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
606 print_source_lines_flags psl_flags;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
613 for (line = lines.begin; line < lines.end; ++line)
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
627 /* Disassemble a section of the recorded instruction trace. */
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
635 struct cleanup *cleanups, *ui_item_chain;
636 struct gdbarch *gdbarch;
637 struct btrace_insn_iterator it;
638 struct btrace_line_range last_lines;
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
643 flags |= DISASSEMBLY_SPECULATIVE;
645 gdbarch = target_gdbarch ();
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
654 gdb_pretty_print_disassembler disasm (gdbarch);
656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
658 const struct btrace_insn *insn;
660 insn = btrace_insn_get (&it);
662 /* A NULL instruction indicates a gap in the trace. */
665 const struct btrace_config *conf;
667 conf = btrace_conf (btinfo);
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
681 struct disasm_insn dinsn;
683 if ((flags & DISASSEMBLY_SOURCE) != 0)
685 struct btrace_line_range lines;
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
694 else if (ui_item_chain == NULL)
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
703 gdb_assert (ui_item_chain != NULL);
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
711 dinsn.is_speculative = 1;
713 disasm.pretty_print_insn (uiout, &dinsn, flags);
717 do_cleanups (cleanups);
720 /* The to_insn_history method of target record-btrace. */
723 record_btrace_insn_history (struct target_ops *self, int size, int flags)
725 struct btrace_thread_info *btinfo;
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
728 struct cleanup *uiout_cleanup;
729 struct ui_out *uiout;
730 unsigned int context, covered;
732 uiout = current_uiout;
733 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
735 context = abs (size);
737 error (_("Bad record instruction-history-size."));
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
743 struct btrace_insn_iterator *replay;
745 DEBUG ("insn-history (0x%x): %d", flags, size);
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
753 btrace_insn_end (&begin, btinfo);
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
774 begin = history->begin;
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
783 covered = btrace_insn_prev (&begin, context);
788 covered = btrace_insn_next (&end, context);
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
802 btrace_set_insn_history (btinfo, &begin, &end);
803 do_cleanups (uiout_cleanup);
806 /* The to_insn_history_range method of target record-btrace. */
809 record_btrace_insn_history_range (struct target_ops *self,
810 ULONGEST from, ULONGEST to, int flags)
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
815 struct cleanup *uiout_cleanup;
816 struct ui_out *uiout;
817 unsigned int low, high;
820 uiout = current_uiout;
821 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
826 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
828 /* Check for wrap-arounds. */
829 if (low != from || high != to)
830 error (_("Bad range."));
833 error (_("Bad range."));
835 btinfo = require_btrace ();
837 found = btrace_find_insn_by_number (&begin, btinfo, low);
839 error (_("Range out of bounds."));
841 found = btrace_find_insn_by_number (&end, btinfo, high);
844 /* Silently truncate the range. */
845 btrace_insn_end (&end, btinfo);
849 /* We want both begin and end to be inclusive. */
850 btrace_insn_next (&end, 1);
853 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
854 btrace_set_insn_history (btinfo, &begin, &end);
856 do_cleanups (uiout_cleanup);
859 /* The to_insn_history_from method of target record-btrace. */
862 record_btrace_insn_history_from (struct target_ops *self,
863 ULONGEST from, int size, int flags)
865 ULONGEST begin, end, context;
867 context = abs (size);
869 error (_("Bad record instruction-history-size."));
878 begin = from - context + 1;
883 end = from + context - 1;
885 /* Check for wrap-around. */
890 record_btrace_insn_history_range (self, begin, end, flags);
893 /* Print the instruction number range for a function call history line. */
896 btrace_call_history_insn_range (struct ui_out *uiout,
897 const struct btrace_function *bfun)
899 unsigned int begin, end, size;
901 size = VEC_length (btrace_insn_s, bfun->insn);
902 gdb_assert (size > 0);
904 begin = bfun->insn_offset;
905 end = begin + size - 1;
907 ui_out_field_uint (uiout, "insn begin", begin);
909 ui_out_field_uint (uiout, "insn end", end);
912 /* Compute the lowest and highest source line for the instructions in BFUN
913 and return them in PBEGIN and PEND.
914 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
915 result from inlining or macro expansion. */
918 btrace_compute_src_line_range (const struct btrace_function *bfun,
919 int *pbegin, int *pend)
921 struct btrace_insn *insn;
922 struct symtab *symtab;
934 symtab = symbol_symtab (sym);
936 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
938 struct symtab_and_line sal;
940 sal = find_pc_line (insn->pc, 0);
941 if (sal.symtab != symtab || sal.line == 0)
944 begin = std::min (begin, sal.line);
945 end = std::max (end, sal.line);
953 /* Print the source line information for a function call history line. */
956 btrace_call_history_src_line (struct ui_out *uiout,
957 const struct btrace_function *bfun)
966 uiout->field_string ("file",
967 symtab_to_filename_for_display (symbol_symtab (sym)));
969 btrace_compute_src_line_range (bfun, &begin, &end);
974 uiout->field_int ("min line", begin);
980 uiout->field_int ("max line", end);
983 /* Get the name of a branch trace function. */
986 btrace_get_bfun_name (const struct btrace_function *bfun)
988 struct minimal_symbol *msym;
998 return SYMBOL_PRINT_NAME (sym);
999 else if (msym != NULL)
1000 return MSYMBOL_PRINT_NAME (msym);
1005 /* Disassemble a section of the recorded function trace. */
1008 btrace_call_history (struct ui_out *uiout,
1009 const struct btrace_thread_info *btinfo,
1010 const struct btrace_call_iterator *begin,
1011 const struct btrace_call_iterator *end,
1014 struct btrace_call_iterator it;
1015 record_print_flags flags = (enum record_print_flag) int_flags;
1017 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1018 btrace_call_number (end));
1020 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1022 const struct btrace_function *bfun;
1023 struct minimal_symbol *msym;
1026 bfun = btrace_call_get (&it);
1030 /* Print the function index. */
1031 ui_out_field_uint (uiout, "index", bfun->number);
1034 /* Indicate gaps in the trace. */
1035 if (bfun->errcode != 0)
1037 const struct btrace_config *conf;
1039 conf = btrace_conf (btinfo);
1041 /* We have trace so we must have a configuration. */
1042 gdb_assert (conf != NULL);
1044 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1049 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1051 int level = bfun->level + btinfo->level, i;
1053 for (i = 0; i < level; ++i)
1058 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1059 else if (msym != NULL)
1060 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1061 else if (!uiout->is_mi_like_p ())
1062 uiout->field_string ("function", "??");
1064 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1066 uiout->text (_("\tinst "));
1067 btrace_call_history_insn_range (uiout, bfun);
1070 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1072 uiout->text (_("\tat "));
1073 btrace_call_history_src_line (uiout, bfun);
1080 /* The to_call_history method of target record-btrace. */
1083 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1085 struct btrace_thread_info *btinfo;
1086 struct btrace_call_history *history;
1087 struct btrace_call_iterator begin, end;
1088 struct cleanup *uiout_cleanup;
1089 struct ui_out *uiout;
1090 unsigned int context, covered;
1091 record_print_flags flags = (enum record_print_flag) int_flags;
1093 uiout = current_uiout;
1094 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1096 context = abs (size);
1098 error (_("Bad record function-call-history-size."));
1100 btinfo = require_btrace ();
1101 history = btinfo->call_history;
1102 if (history == NULL)
1104 struct btrace_insn_iterator *replay;
1106 DEBUG ("call-history (0x%x): %d", int_flags, size);
1108 /* If we're replaying, we start at the replay position. Otherwise, we
1109 start at the tail of the trace. */
1110 replay = btinfo->replay;
1113 begin.function = replay->function;
1114 begin.btinfo = btinfo;
1117 btrace_call_end (&begin, btinfo);
1119 /* We start from here and expand in the requested direction. Then we
1120 expand in the other direction, as well, to fill up any remaining
1125 /* We want the current position covered, as well. */
1126 covered = btrace_call_next (&end, 1);
1127 covered += btrace_call_prev (&begin, context - covered);
1128 covered += btrace_call_next (&end, context - covered);
1132 covered = btrace_call_next (&end, context);
1133 covered += btrace_call_prev (&begin, context- covered);
1138 begin = history->begin;
1141 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1142 btrace_call_number (&begin), btrace_call_number (&end));
1147 covered = btrace_call_prev (&begin, context);
1152 covered = btrace_call_next (&end, context);
1157 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1161 printf_unfiltered (_("At the start of the branch trace record.\n"));
1163 printf_unfiltered (_("At the end of the branch trace record.\n"));
1166 btrace_set_call_history (btinfo, &begin, &end);
1167 do_cleanups (uiout_cleanup);
1170 /* The to_call_history_range method of target record-btrace. */
1173 record_btrace_call_history_range (struct target_ops *self,
1174 ULONGEST from, ULONGEST to,
1177 struct btrace_thread_info *btinfo;
1178 struct btrace_call_history *history;
1179 struct btrace_call_iterator begin, end;
1180 struct cleanup *uiout_cleanup;
1181 struct ui_out *uiout;
1182 unsigned int low, high;
1184 record_print_flags flags = (enum record_print_flag) int_flags;
1186 uiout = current_uiout;
1187 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1192 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1194 /* Check for wrap-arounds. */
1195 if (low != from || high != to)
1196 error (_("Bad range."));
1199 error (_("Bad range."));
1201 btinfo = require_btrace ();
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1205 error (_("Range out of bounds."));
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220 btrace_set_call_history (btinfo, &begin, &end);
1222 do_cleanups (uiout_cleanup);
1225 /* The to_call_history_from method of target record-btrace. */
1228 record_btrace_call_history_from (struct target_ops *self,
1229 ULONGEST from, int size,
1232 ULONGEST begin, end, context;
1233 record_print_flags flags = (enum record_print_flag) int_flags;
1235 context = abs (size);
1237 error (_("Bad record function-call-history-size."));
1246 begin = from - context + 1;
1251 end = from + context - 1;
1253 /* Check for wrap-around. */
1258 record_btrace_call_history_range (self, begin, end, flags);
1261 /* The to_record_method method of target record-btrace. */
1263 static enum record_method
1264 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1266 const struct btrace_config *config;
1267 struct thread_info * const tp = find_thread_ptid (ptid);
1270 error (_("No thread."));
1272 if (tp->btrace.target == NULL)
1273 return RECORD_METHOD_NONE;
1275 return RECORD_METHOD_BTRACE;
1278 /* The to_record_is_replaying method of target record-btrace. */
1281 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1283 struct thread_info *tp;
1285 ALL_NON_EXITED_THREADS (tp)
1286 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1292 /* The to_record_will_replay method of target record-btrace. */
1295 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1297 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1300 /* The to_xfer_partial method of target record-btrace. */
1302 static enum target_xfer_status
1303 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1304 const char *annex, gdb_byte *readbuf,
1305 const gdb_byte *writebuf, ULONGEST offset,
1306 ULONGEST len, ULONGEST *xfered_len)
1308 struct target_ops *t;
1310 /* Filter out requests that don't make sense during replay. */
1311 if (replay_memory_access == replay_memory_access_read_only
1312 && !record_btrace_generating_corefile
1313 && record_btrace_is_replaying (ops, inferior_ptid))
1317 case TARGET_OBJECT_MEMORY:
1319 struct target_section *section;
1321 /* We do not allow writing memory in general. */
1322 if (writebuf != NULL)
1325 return TARGET_XFER_UNAVAILABLE;
1328 /* We allow reading readonly memory. */
1329 section = target_section_by_addr (ops, offset);
1330 if (section != NULL)
1332 /* Check if the section we found is readonly. */
1333 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1334 section->the_bfd_section)
1335 & SEC_READONLY) != 0)
1337 /* Truncate the request to fit into this section. */
1338 len = std::min (len, section->endaddr - offset);
1344 return TARGET_XFER_UNAVAILABLE;
1349 /* Forward the request. */
1351 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1352 offset, len, xfered_len);
1355 /* The to_insert_breakpoint method of target record-btrace. */
1358 record_btrace_insert_breakpoint (struct target_ops *ops,
1359 struct gdbarch *gdbarch,
1360 struct bp_target_info *bp_tgt)
1365 /* Inserting breakpoints requires accessing memory. Allow it for the
1366 duration of this function. */
1367 old = replay_memory_access;
1368 replay_memory_access = replay_memory_access_read_write;
1373 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1375 CATCH (except, RETURN_MASK_ALL)
1377 replay_memory_access = old;
1378 throw_exception (except);
1381 replay_memory_access = old;
1386 /* The to_remove_breakpoint method of target record-btrace. */
1389 record_btrace_remove_breakpoint (struct target_ops *ops,
1390 struct gdbarch *gdbarch,
1391 struct bp_target_info *bp_tgt,
1392 enum remove_bp_reason reason)
1397 /* Removing breakpoints requires accessing memory. Allow it for the
1398 duration of this function. */
1399 old = replay_memory_access;
1400 replay_memory_access = replay_memory_access_read_write;
1405 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1408 CATCH (except, RETURN_MASK_ALL)
1410 replay_memory_access = old;
1411 throw_exception (except);
1414 replay_memory_access = old;
1419 /* The to_fetch_registers method of target record-btrace. */
1422 record_btrace_fetch_registers (struct target_ops *ops,
1423 struct regcache *regcache, int regno)
1425 struct btrace_insn_iterator *replay;
1426 struct thread_info *tp;
1428 tp = find_thread_ptid (regcache_get_ptid (regcache));
1429 gdb_assert (tp != NULL);
1431 replay = tp->btrace.replay;
1432 if (replay != NULL && !record_btrace_generating_corefile)
1434 const struct btrace_insn *insn;
1435 struct gdbarch *gdbarch;
1438 gdbarch = get_regcache_arch (regcache);
1439 pcreg = gdbarch_pc_regnum (gdbarch);
1443 /* We can only provide the PC register. */
1444 if (regno >= 0 && regno != pcreg)
1447 insn = btrace_insn_get (replay);
1448 gdb_assert (insn != NULL);
1450 regcache_raw_supply (regcache, regno, &insn->pc);
1454 struct target_ops *t = ops->beneath;
1456 t->to_fetch_registers (t, regcache, regno);
1460 /* The to_store_registers method of target record-btrace. */
1463 record_btrace_store_registers (struct target_ops *ops,
1464 struct regcache *regcache, int regno)
1466 struct target_ops *t;
1468 if (!record_btrace_generating_corefile
1469 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1470 error (_("Cannot write registers while replaying."));
1472 gdb_assert (may_write_registers != 0);
1475 t->to_store_registers (t, regcache, regno);
1478 /* The to_prepare_to_store method of target record-btrace. */
1481 record_btrace_prepare_to_store (struct target_ops *ops,
1482 struct regcache *regcache)
1484 struct target_ops *t;
1486 if (!record_btrace_generating_corefile
1487 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1491 t->to_prepare_to_store (t, regcache);
1494 /* The branch trace frame cache. */
1496 struct btrace_frame_cache
1499 struct thread_info *tp;
1501 /* The frame info. */
1502 struct frame_info *frame;
1504 /* The branch trace function segment. */
1505 const struct btrace_function *bfun;
1508 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1510 static htab_t bfcache;
1512 /* hash_f for htab_create_alloc of bfcache. */
1515 bfcache_hash (const void *arg)
1517 const struct btrace_frame_cache *cache
1518 = (const struct btrace_frame_cache *) arg;
1520 return htab_hash_pointer (cache->frame);
1523 /* eq_f for htab_create_alloc of bfcache. */
1526 bfcache_eq (const void *arg1, const void *arg2)
1528 const struct btrace_frame_cache *cache1
1529 = (const struct btrace_frame_cache *) arg1;
1530 const struct btrace_frame_cache *cache2
1531 = (const struct btrace_frame_cache *) arg2;
1533 return cache1->frame == cache2->frame;
1536 /* Create a new btrace frame cache. */
1538 static struct btrace_frame_cache *
1539 bfcache_new (struct frame_info *frame)
1541 struct btrace_frame_cache *cache;
1544 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1545 cache->frame = frame;
1547 slot = htab_find_slot (bfcache, cache, INSERT);
1548 gdb_assert (*slot == NULL);
1554 /* Extract the branch trace function from a branch trace frame. */
1556 static const struct btrace_function *
1557 btrace_get_frame_function (struct frame_info *frame)
1559 const struct btrace_frame_cache *cache;
1560 const struct btrace_function *bfun;
1561 struct btrace_frame_cache pattern;
1564 pattern.frame = frame;
1566 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1570 cache = (const struct btrace_frame_cache *) *slot;
1574 /* Implement stop_reason method for record_btrace_frame_unwind. */
1576 static enum unwind_stop_reason
1577 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1580 const struct btrace_frame_cache *cache;
1581 const struct btrace_function *bfun;
1583 cache = (const struct btrace_frame_cache *) *this_cache;
1585 gdb_assert (bfun != NULL);
1587 if (bfun->up == NULL)
1588 return UNWIND_UNAVAILABLE;
1590 return UNWIND_NO_REASON;
1593 /* Implement this_id method for record_btrace_frame_unwind. */
1596 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1597 struct frame_id *this_id)
1599 const struct btrace_frame_cache *cache;
1600 const struct btrace_function *bfun;
1601 CORE_ADDR code, special;
1603 cache = (const struct btrace_frame_cache *) *this_cache;
1606 gdb_assert (bfun != NULL);
1608 while (bfun->segment.prev != NULL)
1609 bfun = bfun->segment.prev;
1611 code = get_frame_func (this_frame);
1612 special = bfun->number;
1614 *this_id = frame_id_build_unavailable_stack_special (code, special);
1616 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1617 btrace_get_bfun_name (cache->bfun),
1618 core_addr_to_string_nz (this_id->code_addr),
1619 core_addr_to_string_nz (this_id->special_addr));
1622 /* Implement prev_register method for record_btrace_frame_unwind. */
1624 static struct value *
1625 record_btrace_frame_prev_register (struct frame_info *this_frame,
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun, *caller;
1631 const struct btrace_insn *insn;
1632 struct gdbarch *gdbarch;
1636 gdbarch = get_frame_arch (this_frame);
1637 pcreg = gdbarch_pc_regnum (gdbarch);
1638 if (pcreg < 0 || regnum != pcreg)
1639 throw_error (NOT_AVAILABLE_ERROR,
1640 _("Registers are not available in btrace record history"));
1642 cache = (const struct btrace_frame_cache *) *this_cache;
1644 gdb_assert (bfun != NULL);
1648 throw_error (NOT_AVAILABLE_ERROR,
1649 _("No caller in btrace record history"));
1651 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1653 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1658 insn = VEC_last (btrace_insn_s, caller->insn);
1661 pc += gdb_insn_length (gdbarch, pc);
1664 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1665 btrace_get_bfun_name (bfun), bfun->level,
1666 core_addr_to_string_nz (pc));
1668 return frame_unwind_got_address (this_frame, regnum, pc);
1671 /* Implement sniffer method for record_btrace_frame_unwind. */
1674 record_btrace_frame_sniffer (const struct frame_unwind *self,
1675 struct frame_info *this_frame,
1678 const struct btrace_function *bfun;
1679 struct btrace_frame_cache *cache;
1680 struct thread_info *tp;
1681 struct frame_info *next;
1683 /* THIS_FRAME does not contain a reference to its thread. */
1684 tp = find_thread_ptid (inferior_ptid);
1685 gdb_assert (tp != NULL);
1688 next = get_next_frame (this_frame);
1691 const struct btrace_insn_iterator *replay;
1693 replay = tp->btrace.replay;
1695 bfun = replay->function;
1699 const struct btrace_function *callee;
1701 callee = btrace_get_frame_function (next);
1702 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1709 DEBUG ("[frame] sniffed frame for %s on level %d",
1710 btrace_get_bfun_name (bfun), bfun->level);
1712 /* This is our frame. Initialize the frame cache. */
1713 cache = bfcache_new (this_frame);
1717 *this_cache = cache;
1721 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1724 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1725 struct frame_info *this_frame,
1728 const struct btrace_function *bfun, *callee;
1729 struct btrace_frame_cache *cache;
1730 struct frame_info *next;
1732 next = get_next_frame (this_frame);
1736 callee = btrace_get_frame_function (next);
1740 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1747 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1748 btrace_get_bfun_name (bfun), bfun->level);
1750 /* This is our frame. Initialize the frame cache. */
1751 cache = bfcache_new (this_frame);
1752 cache->tp = find_thread_ptid (inferior_ptid);
1755 *this_cache = cache;
1760 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1762 struct btrace_frame_cache *cache;
1765 cache = (struct btrace_frame_cache *) this_cache;
1767 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1768 gdb_assert (slot != NULL);
1770 htab_remove_elt (bfcache, cache);
1773 /* btrace recording does not store previous memory content, neither the stack
1774 frames content. Any unwinding would return errorneous results as the stack
1775 contents no longer matches the changed PC value restored from history.
1776 Therefore this unwinder reports any possibly unwound registers as
1779 const struct frame_unwind record_btrace_frame_unwind =
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1786 record_btrace_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
1790 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1793 record_btrace_frame_unwind_stop_reason,
1794 record_btrace_frame_this_id,
1795 record_btrace_frame_prev_register,
1797 record_btrace_tailcall_frame_sniffer,
1798 record_btrace_frame_dealloc_cache
1801 /* Implement the to_get_unwinder method. */
1803 static const struct frame_unwind *
1804 record_btrace_to_get_unwinder (struct target_ops *self)
1806 return &record_btrace_frame_unwind;
1809 /* Implement the to_get_tailcall_unwinder method. */
1811 static const struct frame_unwind *
1812 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1814 return &record_btrace_tailcall_frame_unwind;
1817 /* Return a human-readable string for FLAG. */
1820 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1828 return "reverse-step";
1834 return "reverse-cont";
1843 /* Indicate that TP should be resumed according to FLAG. */
1846 record_btrace_resume_thread (struct thread_info *tp,
1847 enum btrace_thread_flag flag)
1849 struct btrace_thread_info *btinfo;
1851 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1852 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1854 btinfo = &tp->btrace;
1856 /* Fetch the latest branch trace. */
1859 /* A resume request overwrites a preceding resume or stop request. */
1860 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1861 btinfo->flags |= flag;
1864 /* Get the current frame for TP. */
1866 static struct frame_info *
1867 get_thread_current_frame (struct thread_info *tp)
1869 struct frame_info *frame;
1870 ptid_t old_inferior_ptid;
1873 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1874 old_inferior_ptid = inferior_ptid;
1875 inferior_ptid = tp->ptid;
1877 /* Clear the executing flag to allow changes to the current frame.
1878 We are not actually running, yet. We just started a reverse execution
1879 command or a record goto command.
1880 For the latter, EXECUTING is false and this has no effect.
1881 For the former, EXECUTING is true and we're in to_wait, about to
1882 move the thread. Since we need to recompute the stack, we temporarily
1883 set EXECUTING to flase. */
1884 executing = is_executing (inferior_ptid);
1885 set_executing (inferior_ptid, 0);
1890 frame = get_current_frame ();
1892 CATCH (except, RETURN_MASK_ALL)
1894 /* Restore the previous execution state. */
1895 set_executing (inferior_ptid, executing);
1897 /* Restore the previous inferior_ptid. */
1898 inferior_ptid = old_inferior_ptid;
1900 throw_exception (except);
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1913 /* Start replaying a thread. */
1915 static struct btrace_insn_iterator *
1916 record_btrace_start_replaying (struct thread_info *tp)
1918 struct btrace_insn_iterator *replay;
1919 struct btrace_thread_info *btinfo;
1921 btinfo = &tp->btrace;
1924 /* We can't start replaying without trace. */
1925 if (btinfo->begin == NULL)
1928 /* GDB stores the current frame_id when stepping in order to detects steps
1930 Since frames are computed differently when we're replaying, we need to
1931 recompute those stored frames and fix them up so we can still detect
1932 subroutines after we started replaying. */
1935 struct frame_info *frame;
1936 struct frame_id frame_id;
1937 int upd_step_frame_id, upd_step_stack_frame_id;
1939 /* The current frame without replaying - computed via normal unwind. */
1940 frame = get_thread_current_frame (tp);
1941 frame_id = get_frame_id (frame);
1943 /* Check if we need to update any stepping-related frame id's. */
1944 upd_step_frame_id = frame_id_eq (frame_id,
1945 tp->control.step_frame_id);
1946 upd_step_stack_frame_id = frame_id_eq (frame_id,
1947 tp->control.step_stack_frame_id);
1949 /* We start replaying at the end of the branch trace. This corresponds
1950 to the current instruction. */
1951 replay = XNEW (struct btrace_insn_iterator);
1952 btrace_insn_end (replay, btinfo);
1954 /* Skip gaps at the end of the trace. */
1955 while (btrace_insn_get (replay) == NULL)
1959 steps = btrace_insn_prev (replay, 1);
1961 error (_("No trace."));
1964 /* We're not replaying, yet. */
1965 gdb_assert (btinfo->replay == NULL);
1966 btinfo->replay = replay;
1968 /* Make sure we're not using any stale registers. */
1969 registers_changed_ptid (tp->ptid);
1971 /* The current frame with replaying - computed via btrace unwind. */
1972 frame = get_thread_current_frame (tp);
1973 frame_id = get_frame_id (frame);
1975 /* Replace stepping related frames where necessary. */
1976 if (upd_step_frame_id)
1977 tp->control.step_frame_id = frame_id;
1978 if (upd_step_stack_frame_id)
1979 tp->control.step_stack_frame_id = frame_id;
1981 CATCH (except, RETURN_MASK_ALL)
1983 xfree (btinfo->replay);
1984 btinfo->replay = NULL;
1986 registers_changed_ptid (tp->ptid);
1988 throw_exception (except);
1995 /* Stop replaying a thread. */
1998 record_btrace_stop_replaying (struct thread_info *tp)
2000 struct btrace_thread_info *btinfo;
2002 btinfo = &tp->btrace;
2004 xfree (btinfo->replay);
2005 btinfo->replay = NULL;
2007 /* Make sure we're not leaving any stale registers. */
2008 registers_changed_ptid (tp->ptid);
2011 /* Stop replaying TP if it is at the end of its execution history. */
2014 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2016 struct btrace_insn_iterator *replay, end;
2017 struct btrace_thread_info *btinfo;
2019 btinfo = &tp->btrace;
2020 replay = btinfo->replay;
2025 btrace_insn_end (&end, btinfo);
2027 if (btrace_insn_cmp (replay, &end) == 0)
2028 record_btrace_stop_replaying (tp);
2031 /* The to_resume method of target record-btrace. */
2034 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2035 enum gdb_signal signal)
2037 struct thread_info *tp;
2038 enum btrace_thread_flag flag, cflag;
2040 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2041 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2042 step ? "step" : "cont");
2044 /* Store the execution direction of the last resume.
2046 If there is more than one to_resume call, we have to rely on infrun
2047 to not change the execution direction in-between. */
2048 record_btrace_resume_exec_dir = execution_direction;
2050 /* As long as we're not replaying, just forward the request.
2052 For non-stop targets this means that no thread is replaying. In order to
2053 make progress, we may need to explicitly move replaying threads to the end
2054 of their execution history. */
2055 if ((execution_direction != EXEC_REVERSE)
2056 && !record_btrace_is_replaying (ops, minus_one_ptid))
2059 ops->to_resume (ops, ptid, step, signal);
2063 /* Compute the btrace thread flag for the requested move. */
2064 if (execution_direction == EXEC_REVERSE)
2066 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2071 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2075 /* We just indicate the resume intent here. The actual stepping happens in
2076 record_btrace_wait below.
2078 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2079 if (!target_is_non_stop_p ())
2081 gdb_assert (ptid_match (inferior_ptid, ptid));
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2086 if (ptid_match (tp->ptid, inferior_ptid))
2087 record_btrace_resume_thread (tp, flag);
2089 record_btrace_resume_thread (tp, cflag);
2094 ALL_NON_EXITED_THREADS (tp)
2095 if (ptid_match (tp->ptid, ptid))
2096 record_btrace_resume_thread (tp, flag);
2099 /* Async support. */
2100 if (target_can_async_p ())
2103 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2107 /* The to_commit_resume method of target record-btrace. */
2110 record_btrace_commit_resume (struct target_ops *ops)
2112 if ((execution_direction != EXEC_REVERSE)
2113 && !record_btrace_is_replaying (ops, minus_one_ptid))
2114 ops->beneath->to_commit_resume (ops->beneath);
2117 /* Cancel resuming TP. */
2120 record_btrace_cancel_resume (struct thread_info *tp)
2122 enum btrace_thread_flag flags;
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2128 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2129 print_thread_id (tp),
2130 target_pid_to_str (tp->ptid), flags,
2131 btrace_thread_flag_to_str (flags));
2133 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2134 record_btrace_stop_replaying_at_end (tp);
2137 /* Return a target_waitstatus indicating that we ran out of history. */
2139 static struct target_waitstatus
2140 btrace_step_no_history (void)
2142 struct target_waitstatus status;
2144 status.kind = TARGET_WAITKIND_NO_HISTORY;
2149 /* Return a target_waitstatus indicating that a step finished. */
2151 static struct target_waitstatus
2152 btrace_step_stopped (void)
2154 struct target_waitstatus status;
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_TRAP;
2162 /* Return a target_waitstatus indicating that a thread was stopped as
2165 static struct target_waitstatus
2166 btrace_step_stopped_on_request (void)
2168 struct target_waitstatus status;
2170 status.kind = TARGET_WAITKIND_STOPPED;
2171 status.value.sig = GDB_SIGNAL_0;
2176 /* Return a target_waitstatus indicating a spurious stop. */
2178 static struct target_waitstatus
2179 btrace_step_spurious (void)
2181 struct target_waitstatus status;
2183 status.kind = TARGET_WAITKIND_SPURIOUS;
2188 /* Return a target_waitstatus indicating that the thread was not resumed. */
2190 static struct target_waitstatus
2191 btrace_step_no_resumed (void)
2193 struct target_waitstatus status;
2195 status.kind = TARGET_WAITKIND_NO_RESUMED;
2200 /* Return a target_waitstatus indicating that we should wait again. */
2202 static struct target_waitstatus
2203 btrace_step_again (void)
2205 struct target_waitstatus status;
2207 status.kind = TARGET_WAITKIND_IGNORE;
2212 /* Clear the record histories. */
2215 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2217 xfree (btinfo->insn_history);
2218 xfree (btinfo->call_history);
2220 btinfo->insn_history = NULL;
2221 btinfo->call_history = NULL;
2224 /* Check whether TP's current replay position is at a breakpoint. */
2227 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2229 struct btrace_insn_iterator *replay;
2230 struct btrace_thread_info *btinfo;
2231 const struct btrace_insn *insn;
2232 struct inferior *inf;
2234 btinfo = &tp->btrace;
2235 replay = btinfo->replay;
2240 insn = btrace_insn_get (replay);
2244 inf = find_inferior_ptid (tp->ptid);
2248 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2249 &btinfo->stop_reason);
2252 /* Step one instruction in forward direction. */
2254 static struct target_waitstatus
2255 record_btrace_single_step_forward (struct thread_info *tp)
2257 struct btrace_insn_iterator *replay, end, start;
2258 struct btrace_thread_info *btinfo;
2260 btinfo = &tp->btrace;
2261 replay = btinfo->replay;
2263 /* We're done if we're not replaying. */
2265 return btrace_step_no_history ();
2267 /* Check if we're stepping a breakpoint. */
2268 if (record_btrace_replay_at_breakpoint (tp))
2269 return btrace_step_stopped ();
2271 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2272 jump back to the instruction at which we started. */
2278 /* We will bail out here if we continue stepping after reaching the end
2279 of the execution history. */
2280 steps = btrace_insn_next (replay, 1);
2284 return btrace_step_no_history ();
2287 while (btrace_insn_get (replay) == NULL);
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
2295 if (btrace_insn_cmp (replay, &end) == 0)
2296 return btrace_step_no_history ();
2298 return btrace_step_spurious ();
2301 /* Step one instruction in backward direction. */
2303 static struct target_waitstatus
2304 record_btrace_single_step_backward (struct thread_info *tp)
2306 struct btrace_insn_iterator *replay, start;
2307 struct btrace_thread_info *btinfo;
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2312 /* Start replaying if we're not already doing so. */
2314 replay = record_btrace_start_replaying (tp);
2316 /* If we can't step any further, we reached the end of the history.
2317 Skip gaps during replay. If we end up at a gap (at the beginning of
2318 the trace), jump back to the instruction at which we started. */
2324 steps = btrace_insn_prev (replay, 1);
2328 return btrace_step_no_history ();
2331 while (btrace_insn_get (replay) == NULL);
2333 /* Check if we're stepping a breakpoint.
2335 For reverse-stepping, this check is after the step. There is logic in
2336 infrun.c that handles reverse-stepping separately. See, for example,
2337 proceed and adjust_pc_after_break.
2339 This code assumes that for reverse-stepping, PC points to the last
2340 de-executed instruction, whereas for forward-stepping PC points to the
2341 next to-be-executed instruction. */
2342 if (record_btrace_replay_at_breakpoint (tp))
2343 return btrace_step_stopped ();
2345 return btrace_step_spurious ();
2348 /* Step a single thread. */
2350 static struct target_waitstatus
2351 record_btrace_step_thread (struct thread_info *tp)
2353 struct btrace_thread_info *btinfo;
2354 struct target_waitstatus status;
2355 enum btrace_thread_flag flags;
2357 btinfo = &tp->btrace;
2359 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2360 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2362 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2363 target_pid_to_str (tp->ptid), flags,
2364 btrace_thread_flag_to_str (flags));
2366 /* We can't step without an execution history. */
2367 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2368 return btrace_step_no_history ();
2373 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2376 return btrace_step_stopped_on_request ();
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2383 return btrace_step_stopped ();
2386 status = record_btrace_single_step_backward (tp);
2387 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2390 return btrace_step_stopped ();
2393 status = record_btrace_single_step_forward (tp);
2394 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2397 btinfo->flags |= flags;
2398 return btrace_step_again ();
2401 status = record_btrace_single_step_backward (tp);
2402 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2405 btinfo->flags |= flags;
2406 return btrace_step_again ();
2409 /* We keep threads moving at the end of their execution history. The to_wait
2410 method will stop the thread for whom the event is reported. */
2411 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2412 btinfo->flags |= flags;
2417 /* A vector of threads. */
2419 typedef struct thread_info * tp_t;
2422 /* Announce further events if necessary. */
2425 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2426 const VEC (tp_t) *no_history)
2428 int more_moving, more_no_history;
2430 more_moving = !VEC_empty (tp_t, moving);
2431 more_no_history = !VEC_empty (tp_t, no_history);
2433 if (!more_moving && !more_no_history)
2437 DEBUG ("movers pending");
2439 if (more_no_history)
2440 DEBUG ("no-history pending");
2442 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2445 /* The to_wait method of target record-btrace. */
2448 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2449 struct target_waitstatus *status, int options)
2451 VEC (tp_t) *moving, *no_history;
2452 struct thread_info *tp, *eventing;
2453 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2455 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2457 /* As long as we're not replaying, just forward the request. */
2458 if ((execution_direction != EXEC_REVERSE)
2459 && !record_btrace_is_replaying (ops, minus_one_ptid))
2462 return ops->to_wait (ops, ptid, status, options);
2468 make_cleanup (VEC_cleanup (tp_t), &moving);
2469 make_cleanup (VEC_cleanup (tp_t), &no_history);
2471 /* Keep a work list of moving threads. */
2472 ALL_NON_EXITED_THREADS (tp)
2473 if (ptid_match (tp->ptid, ptid)
2474 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2475 VEC_safe_push (tp_t, moving, tp);
2477 if (VEC_empty (tp_t, moving))
2479 *status = btrace_step_no_resumed ();
2481 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2482 target_waitstatus_to_string (status));
2484 do_cleanups (cleanups);
2488 /* Step moving threads one by one, one step each, until either one thread
2489 reports an event or we run out of threads to step.
2491 When stepping more than one thread, chances are that some threads reach
2492 the end of their execution history earlier than others. If we reported
2493 this immediately, all-stop on top of non-stop would stop all threads and
2494 resume the same threads next time. And we would report the same thread
2495 having reached the end of its execution history again.
2497 In the worst case, this would starve the other threads. But even if other
2498 threads would be allowed to make progress, this would result in far too
2499 many intermediate stops.
2501 We therefore delay the reporting of "no execution history" until we have
2502 nothing else to report. By this time, all threads should have moved to
2503 either the beginning or the end of their execution history. There will
2504 be a single user-visible stop. */
2506 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2511 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2513 *status = record_btrace_step_thread (tp);
2515 switch (status->kind)
2517 case TARGET_WAITKIND_IGNORE:
2521 case TARGET_WAITKIND_NO_HISTORY:
2522 VEC_safe_push (tp_t, no_history,
2523 VEC_ordered_remove (tp_t, moving, ix));
2527 eventing = VEC_unordered_remove (tp_t, moving, ix);
2533 if (eventing == NULL)
2535 /* We started with at least one moving thread. This thread must have
2536 either stopped or reached the end of its execution history.
2538 In the former case, EVENTING must not be NULL.
2539 In the latter case, NO_HISTORY must not be empty. */
2540 gdb_assert (!VEC_empty (tp_t, no_history));
2542 /* We kept threads moving at the end of their execution history. Stop
2543 EVENTING now that we are going to report its stop. */
2544 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2545 eventing->btrace.flags &= ~BTHR_MOVE;
2547 *status = btrace_step_no_history ();
2550 gdb_assert (eventing != NULL);
2552 /* We kept threads replaying at the end of their execution history. Stop
2553 replaying EVENTING now that we are going to report its stop. */
2554 record_btrace_stop_replaying_at_end (eventing);
2556 /* Stop all other threads. */
2557 if (!target_is_non_stop_p ())
2558 ALL_NON_EXITED_THREADS (tp)
2559 record_btrace_cancel_resume (tp);
2561 /* In async mode, we need to announce further events. */
2562 if (target_is_async_p ())
2563 record_btrace_maybe_mark_async_event (moving, no_history);
2565 /* Start record histories anew from the current position. */
2566 record_btrace_clear_histories (&eventing->btrace);
2568 /* We moved the replay position but did not update registers. */
2569 registers_changed_ptid (eventing->ptid);
2571 DEBUG ("wait ended by thread %s (%s): %s",
2572 print_thread_id (eventing),
2573 target_pid_to_str (eventing->ptid),
2574 target_waitstatus_to_string (status));
2576 do_cleanups (cleanups);
2577 return eventing->ptid;
2580 /* The to_stop method of target record-btrace. */
2583 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2585 DEBUG ("stop %s", target_pid_to_str (ptid));
2587 /* As long as we're not replaying, just forward the request. */
2588 if ((execution_direction != EXEC_REVERSE)
2589 && !record_btrace_is_replaying (ops, minus_one_ptid))
2592 ops->to_stop (ops, ptid);
2596 struct thread_info *tp;
2598 ALL_NON_EXITED_THREADS (tp)
2599 if (ptid_match (tp->ptid, ptid))
2601 tp->btrace.flags &= ~BTHR_MOVE;
2602 tp->btrace.flags |= BTHR_STOP;
2607 /* The to_can_execute_reverse method of target record-btrace. */
2610 record_btrace_can_execute_reverse (struct target_ops *self)
2615 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2618 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2620 if (record_btrace_is_replaying (ops, minus_one_ptid))
2622 struct thread_info *tp = inferior_thread ();
2624 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2627 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2630 /* The to_supports_stopped_by_sw_breakpoint method of target
2634 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2636 if (record_btrace_is_replaying (ops, minus_one_ptid))
2639 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2642 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2645 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2647 if (record_btrace_is_replaying (ops, minus_one_ptid))
2649 struct thread_info *tp = inferior_thread ();
2651 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2654 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2657 /* The to_supports_stopped_by_hw_breakpoint method of target
2661 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2663 if (record_btrace_is_replaying (ops, minus_one_ptid))
2666 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2669 /* The to_update_thread_list method of target record-btrace. */
2672 record_btrace_update_thread_list (struct target_ops *ops)
2674 /* We don't add or remove threads during replay. */
2675 if (record_btrace_is_replaying (ops, minus_one_ptid))
2678 /* Forward the request. */
2680 ops->to_update_thread_list (ops);
2683 /* The to_thread_alive method of target record-btrace. */
2686 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2688 /* We don't add or remove threads during replay. */
2689 if (record_btrace_is_replaying (ops, minus_one_ptid))
2690 return find_thread_ptid (ptid) != NULL;
2692 /* Forward the request. */
2694 return ops->to_thread_alive (ops, ptid);
2697 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2701 record_btrace_set_replay (struct thread_info *tp,
2702 const struct btrace_insn_iterator *it)
2704 struct btrace_thread_info *btinfo;
2706 btinfo = &tp->btrace;
2708 if (it == NULL || it->function == NULL)
2709 record_btrace_stop_replaying (tp);
2712 if (btinfo->replay == NULL)
2713 record_btrace_start_replaying (tp);
2714 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2717 *btinfo->replay = *it;
2718 registers_changed_ptid (tp->ptid);
2721 /* Start anew from the new replay position. */
2722 record_btrace_clear_histories (btinfo);
2724 stop_pc = regcache_read_pc (get_current_regcache ());
2725 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2728 /* The to_goto_record_begin method of target record-btrace. */
2731 record_btrace_goto_begin (struct target_ops *self)
2733 struct thread_info *tp;
2734 struct btrace_insn_iterator begin;
2736 tp = require_btrace_thread ();
2738 btrace_insn_begin (&begin, &tp->btrace);
2740 /* Skip gaps at the beginning of the trace. */
2741 while (btrace_insn_get (&begin) == NULL)
2745 steps = btrace_insn_next (&begin, 1);
2747 error (_("No trace."));
2750 record_btrace_set_replay (tp, &begin);
2753 /* The to_goto_record_end method of target record-btrace. */
2756 record_btrace_goto_end (struct target_ops *ops)
2758 struct thread_info *tp;
2760 tp = require_btrace_thread ();
2762 record_btrace_set_replay (tp, NULL);
2765 /* The to_goto_record method of target record-btrace. */
2768 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2770 struct thread_info *tp;
2771 struct btrace_insn_iterator it;
2772 unsigned int number;
2777 /* Check for wrap-arounds. */
2779 error (_("Instruction number out of range."));
2781 tp = require_btrace_thread ();
2783 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2785 /* Check if the instruction could not be found or is a gap. */
2786 if (found == 0 || btrace_insn_get (&it) == NULL)
2787 error (_("No such instruction."));
2789 record_btrace_set_replay (tp, &it);
2792 /* The to_record_stop_replaying method of target record-btrace. */
2795 record_btrace_stop_replaying_all (struct target_ops *self)
2797 struct thread_info *tp;
2799 ALL_NON_EXITED_THREADS (tp)
2800 record_btrace_stop_replaying (tp);
2803 /* The to_execution_direction target method. */
2805 static enum exec_direction_kind
2806 record_btrace_execution_direction (struct target_ops *self)
2808 return record_btrace_resume_exec_dir;
2811 /* The to_prepare_to_generate_core target method. */
2814 record_btrace_prepare_to_generate_core (struct target_ops *self)
2816 record_btrace_generating_corefile = 1;
2819 /* The to_done_generating_core target method. */
2822 record_btrace_done_generating_core (struct target_ops *self)
2824 record_btrace_generating_corefile = 0;
2827 /* Initialize the record-btrace target ops. */
2830 init_record_btrace_ops (void)
2832 struct target_ops *ops;
2834 ops = &record_btrace_ops;
2835 ops->to_shortname = "record-btrace";
2836 ops->to_longname = "Branch tracing target";
2837 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2838 ops->to_open = record_btrace_open;
2839 ops->to_close = record_btrace_close;
2840 ops->to_async = record_btrace_async;
2841 ops->to_detach = record_detach;
2842 ops->to_disconnect = record_btrace_disconnect;
2843 ops->to_mourn_inferior = record_mourn_inferior;
2844 ops->to_kill = record_kill;
2845 ops->to_stop_recording = record_btrace_stop_recording;
2846 ops->to_info_record = record_btrace_info;
2847 ops->to_insn_history = record_btrace_insn_history;
2848 ops->to_insn_history_from = record_btrace_insn_history_from;
2849 ops->to_insn_history_range = record_btrace_insn_history_range;
2850 ops->to_call_history = record_btrace_call_history;
2851 ops->to_call_history_from = record_btrace_call_history_from;
2852 ops->to_call_history_range = record_btrace_call_history_range;
2853 ops->to_record_method = record_btrace_record_method;
2854 ops->to_record_is_replaying = record_btrace_is_replaying;
2855 ops->to_record_will_replay = record_btrace_will_replay;
2856 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2857 ops->to_xfer_partial = record_btrace_xfer_partial;
2858 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2859 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2860 ops->to_fetch_registers = record_btrace_fetch_registers;
2861 ops->to_store_registers = record_btrace_store_registers;
2862 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2863 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2864 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2865 ops->to_resume = record_btrace_resume;
2866 ops->to_commit_resume = record_btrace_commit_resume;
2867 ops->to_wait = record_btrace_wait;
2868 ops->to_stop = record_btrace_stop;
2869 ops->to_update_thread_list = record_btrace_update_thread_list;
2870 ops->to_thread_alive = record_btrace_thread_alive;
2871 ops->to_goto_record_begin = record_btrace_goto_begin;
2872 ops->to_goto_record_end = record_btrace_goto_end;
2873 ops->to_goto_record = record_btrace_goto;
2874 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2875 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2876 ops->to_supports_stopped_by_sw_breakpoint
2877 = record_btrace_supports_stopped_by_sw_breakpoint;
2878 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2879 ops->to_supports_stopped_by_hw_breakpoint
2880 = record_btrace_supports_stopped_by_hw_breakpoint;
2881 ops->to_execution_direction = record_btrace_execution_direction;
2882 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2883 ops->to_done_generating_core = record_btrace_done_generating_core;
2884 ops->to_stratum = record_stratum;
2885 ops->to_magic = OPS_MAGIC;
2888 /* Start recording in BTS format. */
2891 cmd_record_btrace_bts_start (char *args, int from_tty)
2893 if (args != NULL && *args != 0)
2894 error (_("Invalid argument."));
2896 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2900 execute_command ("target record-btrace", from_tty);
2902 CATCH (exception, RETURN_MASK_ALL)
2904 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2905 throw_exception (exception);
2910 /* Start recording in Intel Processor Trace format. */
2913 cmd_record_btrace_pt_start (char *args, int from_tty)
2915 if (args != NULL && *args != 0)
2916 error (_("Invalid argument."));
2918 record_btrace_conf.format = BTRACE_FORMAT_PT;
2922 execute_command ("target record-btrace", from_tty);
2924 CATCH (exception, RETURN_MASK_ALL)
2926 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2927 throw_exception (exception);
2932 /* Alias for "target record". */
2935 cmd_record_btrace_start (char *args, int from_tty)
2937 if (args != NULL && *args != 0)
2938 error (_("Invalid argument."));
2940 record_btrace_conf.format = BTRACE_FORMAT_PT;
2944 execute_command ("target record-btrace", from_tty);
2946 CATCH (exception, RETURN_MASK_ALL)
2948 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2952 execute_command ("target record-btrace", from_tty);
2954 CATCH (exception, RETURN_MASK_ALL)
2956 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2957 throw_exception (exception);
2964 /* The "set record btrace" command. */
2967 cmd_set_record_btrace (char *args, int from_tty)
2969 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2972 /* The "show record btrace" command. */
2975 cmd_show_record_btrace (char *args, int from_tty)
2977 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2980 /* The "show record btrace replay-memory-access" command. */
2983 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2984 struct cmd_list_element *c, const char *value)
2986 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2987 replay_memory_access);
2990 /* The "set record btrace bts" command. */
2993 cmd_set_record_btrace_bts (char *args, int from_tty)
2995 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2996 "by an appropriate subcommand.\n"));
2997 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2998 all_commands, gdb_stdout);
3001 /* The "show record btrace bts" command. */
3004 cmd_show_record_btrace_bts (char *args, int from_tty)
3006 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3009 /* The "set record btrace pt" command. */
3012 cmd_set_record_btrace_pt (char *args, int from_tty)
3014 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3015 "by an appropriate subcommand.\n"));
3016 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3017 all_commands, gdb_stdout);
3020 /* The "show record btrace pt" command. */
3023 cmd_show_record_btrace_pt (char *args, int from_tty)
3025 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3028 /* The "record bts buffer-size" show value function. */
3031 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3032 struct cmd_list_element *c,
3035 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3039 /* The "record pt buffer-size" show value function. */
3042 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3043 struct cmd_list_element *c,
3046 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3050 void _initialize_record_btrace (void);
3052 /* Initialize btrace commands. */
3055 _initialize_record_btrace (void)
3057 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3058 _("Start branch trace recording."), &record_btrace_cmdlist,
3059 "record btrace ", 0, &record_cmdlist);
3060 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3062 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3064 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3065 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3066 This format may not be available on all processors."),
3067 &record_btrace_cmdlist);
3068 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3070 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3072 Start branch trace recording in Intel Processor Trace format.\n\n\
3073 This format may not be available on all processors."),
3074 &record_btrace_cmdlist);
3075 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3077 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3078 _("Set record options"), &set_record_btrace_cmdlist,
3079 "set record btrace ", 0, &set_record_cmdlist);
3081 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3082 _("Show record options"), &show_record_btrace_cmdlist,
3083 "show record btrace ", 0, &show_record_cmdlist);
3085 add_setshow_enum_cmd ("replay-memory-access", no_class,
3086 replay_memory_access_types, &replay_memory_access, _("\
3087 Set what memory accesses are allowed during replay."), _("\
3088 Show what memory accesses are allowed during replay."),
3089 _("Default is READ-ONLY.\n\n\
3090 The btrace record target does not trace data.\n\
3091 The memory therefore corresponds to the live target and not \
3092 to the current replay position.\n\n\
3093 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3094 When READ-WRITE, allow accesses to read-only and read-write memory during \
3096 NULL, cmd_show_replay_memory_access,
3097 &set_record_btrace_cmdlist,
3098 &show_record_btrace_cmdlist);
3100 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3101 _("Set record btrace bts options"),
3102 &set_record_btrace_bts_cmdlist,
3103 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3105 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3106 _("Show record btrace bts options"),
3107 &show_record_btrace_bts_cmdlist,
3108 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3110 add_setshow_uinteger_cmd ("buffer-size", no_class,
3111 &record_btrace_conf.bts.size,
3112 _("Set the record/replay bts buffer size."),
3113 _("Show the record/replay bts buffer size."), _("\
3114 When starting recording request a trace buffer of this size. \
3115 The actual buffer size may differ from the requested size. \
3116 Use \"info record\" to see the actual buffer size.\n\n\
3117 Bigger buffers allow longer recording but also take more time to process \
3118 the recorded execution trace.\n\n\
3119 The trace buffer size may not be changed while recording."), NULL,
3120 show_record_bts_buffer_size_value,
3121 &set_record_btrace_bts_cmdlist,
3122 &show_record_btrace_bts_cmdlist);
3124 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3125 _("Set record btrace pt options"),
3126 &set_record_btrace_pt_cmdlist,
3127 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3129 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3130 _("Show record btrace pt options"),
3131 &show_record_btrace_pt_cmdlist,
3132 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3134 add_setshow_uinteger_cmd ("buffer-size", no_class,
3135 &record_btrace_conf.pt.size,
3136 _("Set the record/replay pt buffer size."),
3137 _("Show the record/replay pt buffer size."), _("\
3138 Bigger buffers allow longer recording but also take more time to process \
3139 the recorded execution.\n\
3140 The actual buffer size may differ from the requested size. Use \"info record\" \
3141 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3142 &set_record_btrace_pt_cmdlist,
3143 &show_record_btrace_pt_cmdlist);
3145 init_record_btrace_ops ();
3146 add_target (&record_btrace_ops);
3148 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3151 record_btrace_conf.bts.size = 64 * 1024;
3152 record_btrace_conf.pt.size = 16 * 1024;