1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info *
110 require_btrace_thread (void)
112 struct thread_info *tp;
116 tp = find_thread_ptid (inferior_ptid);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info *
137 require_btrace (void)
139 struct thread_info *tp;
141 tp = require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info *tp)
153 btrace_enable (tp, &record_btrace_conf);
155 CATCH (error, RETURN_MASK_ERROR)
157 warning ("%s", error.message);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg)
167 struct thread_info *tp = (struct thread_info *) arg;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
203 inferior_event_handler (INF_REG_EVENT, NULL);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
220 record_btrace_generating_corefile = 0;
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args, int from_tty)
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
238 if (!target_has_execution)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer == NULL);
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
247 btrace_enable (tp, &record_btrace_conf);
249 make_cleanup (record_btrace_disable_callback, tp);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops *self)
262 struct thread_info *tp;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops *self, const char *args,
279 struct target_ops *beneath = self->beneath;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops *self)
293 struct thread_info *tp;
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops *ops, int enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
318 ops->beneath->to_async (ops->beneath, enable);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size)
330 if ((sz & ((1u << 30) - 1)) == 0)
335 else if ((sz & ((1u << 20) - 1)) == 0)
340 else if ((sz & ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config *conf)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
389 switch (conf->format)
391 case BTRACE_FORMAT_NONE:
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops *self)
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
418 tp = find_thread_ptid (inferior_ptid);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo = &tp->btrace;
426 conf = btrace_conf (btinfo);
428 record_btrace_print_conf (conf);
436 if (!btrace_is_empty (tp))
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
453 gaps = btinfo->ngaps;
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
465 /* Print a decode error. */
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
471 const char *errstr = btrace_decode_error (format, errcode);
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
485 /* Print an unsigned int. */
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
490 uiout->field_fmt (fld, "%u", val);
493 /* A range of source lines. */
495 struct btrace_line_range
497 /* The symtab this line is from. */
498 struct symtab *symtab;
500 /* The first line (inclusive). */
503 /* The last line (exclusive). */
507 /* Construct a line range. */
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
512 struct btrace_line_range range;
514 range.symtab = symtab;
521 /* Add a line to a line range. */
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
526 if (range.end <= range.begin)
528 /* This is the first entry. */
530 range.end = line + 1;
532 else if (line < range.begin)
534 else if (range.end < line)
540 /* Return non-zero if RANGE is empty, zero otherwise. */
543 btrace_line_range_is_empty (struct btrace_line_range range)
545 return range.end <= range.begin;
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
559 /* Find the line range associated with PC. */
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
570 symtab = find_pc_line_symtab (pc);
572 return btrace_mk_line_range (NULL, 0, 0);
574 ltable = SYMTAB_LINETABLE (symtab);
576 return btrace_mk_line_range (symtab, 0, 0);
578 nlines = ltable->nitems;
579 lines = ltable->item;
581 return btrace_mk_line_range (symtab, 0, 0);
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
593 /* Print source lines in LINES to UIOUT.
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
606 print_source_lines_flags psl_flags;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
613 for (line = lines.begin; line < lines.end; ++line)
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
627 /* Disassemble a section of the recorded instruction trace. */
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
635 struct cleanup *cleanups, *ui_item_chain;
636 struct gdbarch *gdbarch;
637 struct btrace_insn_iterator it;
638 struct btrace_line_range last_lines;
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
643 flags |= DISASSEMBLY_SPECULATIVE;
645 gdbarch = target_gdbarch ();
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
654 gdb_pretty_print_disassembler disasm (gdbarch);
656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
658 const struct btrace_insn *insn;
660 insn = btrace_insn_get (&it);
662 /* A NULL instruction indicates a gap in the trace. */
665 const struct btrace_config *conf;
667 conf = btrace_conf (btinfo);
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
681 struct disasm_insn dinsn;
683 if ((flags & DISASSEMBLY_SOURCE) != 0)
685 struct btrace_line_range lines;
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
694 else if (ui_item_chain == NULL)
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
703 gdb_assert (ui_item_chain != NULL);
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
711 dinsn.is_speculative = 1;
713 disasm.pretty_print_insn (uiout, &dinsn, flags);
717 do_cleanups (cleanups);
720 /* The to_insn_history method of target record-btrace. */
723 record_btrace_insn_history (struct target_ops *self, int size, int flags)
725 struct btrace_thread_info *btinfo;
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
728 struct cleanup *uiout_cleanup;
729 struct ui_out *uiout;
730 unsigned int context, covered;
732 uiout = current_uiout;
733 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
735 context = abs (size);
737 error (_("Bad record instruction-history-size."));
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
743 struct btrace_insn_iterator *replay;
745 DEBUG ("insn-history (0x%x): %d", flags, size);
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
753 btrace_insn_end (&begin, btinfo);
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
774 begin = history->begin;
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
783 covered = btrace_insn_prev (&begin, context);
788 covered = btrace_insn_next (&end, context);
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
802 btrace_set_insn_history (btinfo, &begin, &end);
803 do_cleanups (uiout_cleanup);
806 /* The to_insn_history_range method of target record-btrace. */
809 record_btrace_insn_history_range (struct target_ops *self,
810 ULONGEST from, ULONGEST to, int flags)
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
815 struct cleanup *uiout_cleanup;
816 struct ui_out *uiout;
817 unsigned int low, high;
820 uiout = current_uiout;
821 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
826 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
828 /* Check for wrap-arounds. */
829 if (low != from || high != to)
830 error (_("Bad range."));
833 error (_("Bad range."));
835 btinfo = require_btrace ();
837 found = btrace_find_insn_by_number (&begin, btinfo, low);
839 error (_("Range out of bounds."));
841 found = btrace_find_insn_by_number (&end, btinfo, high);
844 /* Silently truncate the range. */
845 btrace_insn_end (&end, btinfo);
849 /* We want both begin and end to be inclusive. */
850 btrace_insn_next (&end, 1);
853 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
854 btrace_set_insn_history (btinfo, &begin, &end);
856 do_cleanups (uiout_cleanup);
859 /* The to_insn_history_from method of target record-btrace. */
862 record_btrace_insn_history_from (struct target_ops *self,
863 ULONGEST from, int size, int flags)
865 ULONGEST begin, end, context;
867 context = abs (size);
869 error (_("Bad record instruction-history-size."));
878 begin = from - context + 1;
883 end = from + context - 1;
885 /* Check for wrap-around. */
890 record_btrace_insn_history_range (self, begin, end, flags);
893 /* Print the instruction number range for a function call history line. */
896 btrace_call_history_insn_range (struct ui_out *uiout,
897 const struct btrace_function *bfun)
899 unsigned int begin, end, size;
901 size = VEC_length (btrace_insn_s, bfun->insn);
902 gdb_assert (size > 0);
904 begin = bfun->insn_offset;
905 end = begin + size - 1;
907 ui_out_field_uint (uiout, "insn begin", begin);
909 ui_out_field_uint (uiout, "insn end", end);
912 /* Compute the lowest and highest source line for the instructions in BFUN
913 and return them in PBEGIN and PEND.
914 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
915 result from inlining or macro expansion. */
918 btrace_compute_src_line_range (const struct btrace_function *bfun,
919 int *pbegin, int *pend)
921 struct btrace_insn *insn;
922 struct symtab *symtab;
934 symtab = symbol_symtab (sym);
936 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
938 struct symtab_and_line sal;
940 sal = find_pc_line (insn->pc, 0);
941 if (sal.symtab != symtab || sal.line == 0)
944 begin = std::min (begin, sal.line);
945 end = std::max (end, sal.line);
953 /* Print the source line information for a function call history line. */
956 btrace_call_history_src_line (struct ui_out *uiout,
957 const struct btrace_function *bfun)
966 uiout->field_string ("file",
967 symtab_to_filename_for_display (symbol_symtab (sym)));
969 btrace_compute_src_line_range (bfun, &begin, &end);
974 uiout->field_int ("min line", begin);
980 uiout->field_int ("max line", end);
983 /* Get the name of a branch trace function. */
986 btrace_get_bfun_name (const struct btrace_function *bfun)
988 struct minimal_symbol *msym;
998 return SYMBOL_PRINT_NAME (sym);
999 else if (msym != NULL)
1000 return MSYMBOL_PRINT_NAME (msym);
1005 /* Disassemble a section of the recorded function trace. */
1008 btrace_call_history (struct ui_out *uiout,
1009 const struct btrace_thread_info *btinfo,
1010 const struct btrace_call_iterator *begin,
1011 const struct btrace_call_iterator *end,
1014 struct btrace_call_iterator it;
1015 record_print_flags flags = (enum record_print_flag) int_flags;
1017 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1018 btrace_call_number (end));
1020 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1022 const struct btrace_function *bfun;
1023 struct minimal_symbol *msym;
1026 bfun = btrace_call_get (&it);
1030 /* Print the function index. */
1031 ui_out_field_uint (uiout, "index", bfun->number);
1034 /* Indicate gaps in the trace. */
1035 if (bfun->errcode != 0)
1037 const struct btrace_config *conf;
1039 conf = btrace_conf (btinfo);
1041 /* We have trace so we must have a configuration. */
1042 gdb_assert (conf != NULL);
1044 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1049 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1051 int level = bfun->level + btinfo->level, i;
1053 for (i = 0; i < level; ++i)
1058 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1059 else if (msym != NULL)
1060 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1061 else if (!uiout->is_mi_like_p ())
1062 uiout->field_string ("function", "??");
1064 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1066 uiout->text (_("\tinst "));
1067 btrace_call_history_insn_range (uiout, bfun);
1070 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1072 uiout->text (_("\tat "));
1073 btrace_call_history_src_line (uiout, bfun);
1080 /* The to_call_history method of target record-btrace. */
1083 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1085 struct btrace_thread_info *btinfo;
1086 struct btrace_call_history *history;
1087 struct btrace_call_iterator begin, end;
1088 struct cleanup *uiout_cleanup;
1089 struct ui_out *uiout;
1090 unsigned int context, covered;
1091 record_print_flags flags = (enum record_print_flag) int_flags;
1093 uiout = current_uiout;
1094 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1096 context = abs (size);
1098 error (_("Bad record function-call-history-size."));
1100 btinfo = require_btrace ();
1101 history = btinfo->call_history;
1102 if (history == NULL)
1104 struct btrace_insn_iterator *replay;
1106 DEBUG ("call-history (0x%x): %d", int_flags, size);
1108 /* If we're replaying, we start at the replay position. Otherwise, we
1109 start at the tail of the trace. */
1110 replay = btinfo->replay;
1113 begin.function = replay->function;
1114 begin.btinfo = btinfo;
1117 btrace_call_end (&begin, btinfo);
1119 /* We start from here and expand in the requested direction. Then we
1120 expand in the other direction, as well, to fill up any remaining
1125 /* We want the current position covered, as well. */
1126 covered = btrace_call_next (&end, 1);
1127 covered += btrace_call_prev (&begin, context - covered);
1128 covered += btrace_call_next (&end, context - covered);
1132 covered = btrace_call_next (&end, context);
1133 covered += btrace_call_prev (&begin, context- covered);
1138 begin = history->begin;
1141 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1142 btrace_call_number (&begin), btrace_call_number (&end));
1147 covered = btrace_call_prev (&begin, context);
1152 covered = btrace_call_next (&end, context);
1157 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1161 printf_unfiltered (_("At the start of the branch trace record.\n"));
1163 printf_unfiltered (_("At the end of the branch trace record.\n"));
1166 btrace_set_call_history (btinfo, &begin, &end);
1167 do_cleanups (uiout_cleanup);
1170 /* The to_call_history_range method of target record-btrace. */
1173 record_btrace_call_history_range (struct target_ops *self,
1174 ULONGEST from, ULONGEST to,
1177 struct btrace_thread_info *btinfo;
1178 struct btrace_call_history *history;
1179 struct btrace_call_iterator begin, end;
1180 struct cleanup *uiout_cleanup;
1181 struct ui_out *uiout;
1182 unsigned int low, high;
1184 record_print_flags flags = (enum record_print_flag) int_flags;
1186 uiout = current_uiout;
1187 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1192 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1194 /* Check for wrap-arounds. */
1195 if (low != from || high != to)
1196 error (_("Bad range."));
1199 error (_("Bad range."));
1201 btinfo = require_btrace ();
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1205 error (_("Range out of bounds."));
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220 btrace_set_call_history (btinfo, &begin, &end);
1222 do_cleanups (uiout_cleanup);
1225 /* The to_call_history_from method of target record-btrace. */
1228 record_btrace_call_history_from (struct target_ops *self,
1229 ULONGEST from, int size,
1232 ULONGEST begin, end, context;
1233 record_print_flags flags = (enum record_print_flag) int_flags;
1235 context = abs (size);
1237 error (_("Bad record function-call-history-size."));
1246 begin = from - context + 1;
1251 end = from + context - 1;
1253 /* Check for wrap-around. */
1258 record_btrace_call_history_range (self, begin, end, flags);
1261 /* The to_record_is_replaying method of target record-btrace. */
1264 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1266 struct thread_info *tp;
1268 ALL_NON_EXITED_THREADS (tp)
1269 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1275 /* The to_record_will_replay method of target record-btrace. */
1278 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1280 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1283 /* The to_xfer_partial method of target record-btrace. */
1285 static enum target_xfer_status
1286 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1287 const char *annex, gdb_byte *readbuf,
1288 const gdb_byte *writebuf, ULONGEST offset,
1289 ULONGEST len, ULONGEST *xfered_len)
1291 struct target_ops *t;
1293 /* Filter out requests that don't make sense during replay. */
1294 if (replay_memory_access == replay_memory_access_read_only
1295 && !record_btrace_generating_corefile
1296 && record_btrace_is_replaying (ops, inferior_ptid))
1300 case TARGET_OBJECT_MEMORY:
1302 struct target_section *section;
1304 /* We do not allow writing memory in general. */
1305 if (writebuf != NULL)
1308 return TARGET_XFER_UNAVAILABLE;
1311 /* We allow reading readonly memory. */
1312 section = target_section_by_addr (ops, offset);
1313 if (section != NULL)
1315 /* Check if the section we found is readonly. */
1316 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1317 section->the_bfd_section)
1318 & SEC_READONLY) != 0)
1320 /* Truncate the request to fit into this section. */
1321 len = std::min (len, section->endaddr - offset);
1327 return TARGET_XFER_UNAVAILABLE;
1332 /* Forward the request. */
1334 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1335 offset, len, xfered_len);
1338 /* The to_insert_breakpoint method of target record-btrace. */
1341 record_btrace_insert_breakpoint (struct target_ops *ops,
1342 struct gdbarch *gdbarch,
1343 struct bp_target_info *bp_tgt)
1348 /* Inserting breakpoints requires accessing memory. Allow it for the
1349 duration of this function. */
1350 old = replay_memory_access;
1351 replay_memory_access = replay_memory_access_read_write;
1356 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1358 CATCH (except, RETURN_MASK_ALL)
1360 replay_memory_access = old;
1361 throw_exception (except);
1364 replay_memory_access = old;
1369 /* The to_remove_breakpoint method of target record-btrace. */
1372 record_btrace_remove_breakpoint (struct target_ops *ops,
1373 struct gdbarch *gdbarch,
1374 struct bp_target_info *bp_tgt,
1375 enum remove_bp_reason reason)
1380 /* Removing breakpoints requires accessing memory. Allow it for the
1381 duration of this function. */
1382 old = replay_memory_access;
1383 replay_memory_access = replay_memory_access_read_write;
1388 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1391 CATCH (except, RETURN_MASK_ALL)
1393 replay_memory_access = old;
1394 throw_exception (except);
1397 replay_memory_access = old;
1402 /* The to_fetch_registers method of target record-btrace. */
1405 record_btrace_fetch_registers (struct target_ops *ops,
1406 struct regcache *regcache, int regno)
1408 struct btrace_insn_iterator *replay;
1409 struct thread_info *tp;
1411 tp = find_thread_ptid (inferior_ptid);
1412 gdb_assert (tp != NULL);
1414 replay = tp->btrace.replay;
1415 if (replay != NULL && !record_btrace_generating_corefile)
1417 const struct btrace_insn *insn;
1418 struct gdbarch *gdbarch;
1421 gdbarch = get_regcache_arch (regcache);
1422 pcreg = gdbarch_pc_regnum (gdbarch);
1426 /* We can only provide the PC register. */
1427 if (regno >= 0 && regno != pcreg)
1430 insn = btrace_insn_get (replay);
1431 gdb_assert (insn != NULL);
1433 regcache_raw_supply (regcache, regno, &insn->pc);
1437 struct target_ops *t = ops->beneath;
1439 t->to_fetch_registers (t, regcache, regno);
1443 /* The to_store_registers method of target record-btrace. */
1446 record_btrace_store_registers (struct target_ops *ops,
1447 struct regcache *regcache, int regno)
1449 struct target_ops *t;
1451 if (!record_btrace_generating_corefile
1452 && record_btrace_is_replaying (ops, inferior_ptid))
1453 error (_("Cannot write registers while replaying."));
1455 gdb_assert (may_write_registers != 0);
1458 t->to_store_registers (t, regcache, regno);
1461 /* The to_prepare_to_store method of target record-btrace. */
1464 record_btrace_prepare_to_store (struct target_ops *ops,
1465 struct regcache *regcache)
1467 struct target_ops *t;
1469 if (!record_btrace_generating_corefile
1470 && record_btrace_is_replaying (ops, inferior_ptid))
1474 t->to_prepare_to_store (t, regcache);
1477 /* The branch trace frame cache. */
1479 struct btrace_frame_cache
1482 struct thread_info *tp;
1484 /* The frame info. */
1485 struct frame_info *frame;
1487 /* The branch trace function segment. */
1488 const struct btrace_function *bfun;
1491 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1493 static htab_t bfcache;
1495 /* hash_f for htab_create_alloc of bfcache. */
1498 bfcache_hash (const void *arg)
1500 const struct btrace_frame_cache *cache
1501 = (const struct btrace_frame_cache *) arg;
1503 return htab_hash_pointer (cache->frame);
1506 /* eq_f for htab_create_alloc of bfcache. */
1509 bfcache_eq (const void *arg1, const void *arg2)
1511 const struct btrace_frame_cache *cache1
1512 = (const struct btrace_frame_cache *) arg1;
1513 const struct btrace_frame_cache *cache2
1514 = (const struct btrace_frame_cache *) arg2;
1516 return cache1->frame == cache2->frame;
1519 /* Create a new btrace frame cache. */
1521 static struct btrace_frame_cache *
1522 bfcache_new (struct frame_info *frame)
1524 struct btrace_frame_cache *cache;
1527 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1528 cache->frame = frame;
1530 slot = htab_find_slot (bfcache, cache, INSERT);
1531 gdb_assert (*slot == NULL);
1537 /* Extract the branch trace function from a branch trace frame. */
1539 static const struct btrace_function *
1540 btrace_get_frame_function (struct frame_info *frame)
1542 const struct btrace_frame_cache *cache;
1543 const struct btrace_function *bfun;
1544 struct btrace_frame_cache pattern;
1547 pattern.frame = frame;
1549 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1553 cache = (const struct btrace_frame_cache *) *slot;
1557 /* Implement stop_reason method for record_btrace_frame_unwind. */
1559 static enum unwind_stop_reason
1560 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1563 const struct btrace_frame_cache *cache;
1564 const struct btrace_function *bfun;
1566 cache = (const struct btrace_frame_cache *) *this_cache;
1568 gdb_assert (bfun != NULL);
1570 if (bfun->up == NULL)
1571 return UNWIND_UNAVAILABLE;
1573 return UNWIND_NO_REASON;
1576 /* Implement this_id method for record_btrace_frame_unwind. */
1579 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1580 struct frame_id *this_id)
1582 const struct btrace_frame_cache *cache;
1583 const struct btrace_function *bfun;
1584 CORE_ADDR code, special;
1586 cache = (const struct btrace_frame_cache *) *this_cache;
1589 gdb_assert (bfun != NULL);
1591 while (bfun->segment.prev != NULL)
1592 bfun = bfun->segment.prev;
1594 code = get_frame_func (this_frame);
1595 special = bfun->number;
1597 *this_id = frame_id_build_unavailable_stack_special (code, special);
1599 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1600 btrace_get_bfun_name (cache->bfun),
1601 core_addr_to_string_nz (this_id->code_addr),
1602 core_addr_to_string_nz (this_id->special_addr));
1605 /* Implement prev_register method for record_btrace_frame_unwind. */
1607 static struct value *
1608 record_btrace_frame_prev_register (struct frame_info *this_frame,
1612 const struct btrace_frame_cache *cache;
1613 const struct btrace_function *bfun, *caller;
1614 const struct btrace_insn *insn;
1615 struct gdbarch *gdbarch;
1619 gdbarch = get_frame_arch (this_frame);
1620 pcreg = gdbarch_pc_regnum (gdbarch);
1621 if (pcreg < 0 || regnum != pcreg)
1622 throw_error (NOT_AVAILABLE_ERROR,
1623 _("Registers are not available in btrace record history"));
1625 cache = (const struct btrace_frame_cache *) *this_cache;
1627 gdb_assert (bfun != NULL);
1631 throw_error (NOT_AVAILABLE_ERROR,
1632 _("No caller in btrace record history"));
1634 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1636 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1641 insn = VEC_last (btrace_insn_s, caller->insn);
1644 pc += gdb_insn_length (gdbarch, pc);
1647 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1648 btrace_get_bfun_name (bfun), bfun->level,
1649 core_addr_to_string_nz (pc));
1651 return frame_unwind_got_address (this_frame, regnum, pc);
1654 /* Implement sniffer method for record_btrace_frame_unwind. */
1657 record_btrace_frame_sniffer (const struct frame_unwind *self,
1658 struct frame_info *this_frame,
1661 const struct btrace_function *bfun;
1662 struct btrace_frame_cache *cache;
1663 struct thread_info *tp;
1664 struct frame_info *next;
1666 /* THIS_FRAME does not contain a reference to its thread. */
1667 tp = find_thread_ptid (inferior_ptid);
1668 gdb_assert (tp != NULL);
1671 next = get_next_frame (this_frame);
1674 const struct btrace_insn_iterator *replay;
1676 replay = tp->btrace.replay;
1678 bfun = replay->function;
1682 const struct btrace_function *callee;
1684 callee = btrace_get_frame_function (next);
1685 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1692 DEBUG ("[frame] sniffed frame for %s on level %d",
1693 btrace_get_bfun_name (bfun), bfun->level);
1695 /* This is our frame. Initialize the frame cache. */
1696 cache = bfcache_new (this_frame);
1700 *this_cache = cache;
1704 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1707 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1708 struct frame_info *this_frame,
1711 const struct btrace_function *bfun, *callee;
1712 struct btrace_frame_cache *cache;
1713 struct frame_info *next;
1715 next = get_next_frame (this_frame);
1719 callee = btrace_get_frame_function (next);
1723 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1730 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1731 btrace_get_bfun_name (bfun), bfun->level);
1733 /* This is our frame. Initialize the frame cache. */
1734 cache = bfcache_new (this_frame);
1735 cache->tp = find_thread_ptid (inferior_ptid);
1738 *this_cache = cache;
1743 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1745 struct btrace_frame_cache *cache;
1748 cache = (struct btrace_frame_cache *) this_cache;
1750 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1751 gdb_assert (slot != NULL);
1753 htab_remove_elt (bfcache, cache);
1756 /* btrace recording does not store previous memory content, neither the stack
1757 frames content. Any unwinding would return errorneous results as the stack
1758 contents no longer matches the changed PC value restored from history.
1759 Therefore this unwinder reports any possibly unwound registers as
1762 const struct frame_unwind record_btrace_frame_unwind =
1765 record_btrace_frame_unwind_stop_reason,
1766 record_btrace_frame_this_id,
1767 record_btrace_frame_prev_register,
1769 record_btrace_frame_sniffer,
1770 record_btrace_frame_dealloc_cache
1773 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1776 record_btrace_frame_unwind_stop_reason,
1777 record_btrace_frame_this_id,
1778 record_btrace_frame_prev_register,
1780 record_btrace_tailcall_frame_sniffer,
1781 record_btrace_frame_dealloc_cache
1784 /* Implement the to_get_unwinder method. */
1786 static const struct frame_unwind *
1787 record_btrace_to_get_unwinder (struct target_ops *self)
1789 return &record_btrace_frame_unwind;
1792 /* Implement the to_get_tailcall_unwinder method. */
1794 static const struct frame_unwind *
1795 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1797 return &record_btrace_tailcall_frame_unwind;
1800 /* Return a human-readable string for FLAG. */
1803 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1811 return "reverse-step";
1817 return "reverse-cont";
1826 /* Indicate that TP should be resumed according to FLAG. */
1829 record_btrace_resume_thread (struct thread_info *tp,
1830 enum btrace_thread_flag flag)
1832 struct btrace_thread_info *btinfo;
1834 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1835 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1837 btinfo = &tp->btrace;
1839 /* Fetch the latest branch trace. */
1842 /* A resume request overwrites a preceding resume or stop request. */
1843 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1844 btinfo->flags |= flag;
1847 /* Get the current frame for TP. */
1849 static struct frame_info *
1850 get_thread_current_frame (struct thread_info *tp)
1852 struct frame_info *frame;
1853 ptid_t old_inferior_ptid;
1856 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1857 old_inferior_ptid = inferior_ptid;
1858 inferior_ptid = tp->ptid;
1860 /* Clear the executing flag to allow changes to the current frame.
1861 We are not actually running, yet. We just started a reverse execution
1862 command or a record goto command.
1863 For the latter, EXECUTING is false and this has no effect.
1864 For the former, EXECUTING is true and we're in to_wait, about to
1865 move the thread. Since we need to recompute the stack, we temporarily
1866 set EXECUTING to flase. */
1867 executing = is_executing (inferior_ptid);
1868 set_executing (inferior_ptid, 0);
1873 frame = get_current_frame ();
1875 CATCH (except, RETURN_MASK_ALL)
1877 /* Restore the previous execution state. */
1878 set_executing (inferior_ptid, executing);
1880 /* Restore the previous inferior_ptid. */
1881 inferior_ptid = old_inferior_ptid;
1883 throw_exception (except);
1887 /* Restore the previous execution state. */
1888 set_executing (inferior_ptid, executing);
1890 /* Restore the previous inferior_ptid. */
1891 inferior_ptid = old_inferior_ptid;
1896 /* Start replaying a thread. */
1898 static struct btrace_insn_iterator *
1899 record_btrace_start_replaying (struct thread_info *tp)
1901 struct btrace_insn_iterator *replay;
1902 struct btrace_thread_info *btinfo;
1904 btinfo = &tp->btrace;
1907 /* We can't start replaying without trace. */
1908 if (btinfo->begin == NULL)
1911 /* GDB stores the current frame_id when stepping in order to detects steps
1913 Since frames are computed differently when we're replaying, we need to
1914 recompute those stored frames and fix them up so we can still detect
1915 subroutines after we started replaying. */
1918 struct frame_info *frame;
1919 struct frame_id frame_id;
1920 int upd_step_frame_id, upd_step_stack_frame_id;
1922 /* The current frame without replaying - computed via normal unwind. */
1923 frame = get_thread_current_frame (tp);
1924 frame_id = get_frame_id (frame);
1926 /* Check if we need to update any stepping-related frame id's. */
1927 upd_step_frame_id = frame_id_eq (frame_id,
1928 tp->control.step_frame_id);
1929 upd_step_stack_frame_id = frame_id_eq (frame_id,
1930 tp->control.step_stack_frame_id);
1932 /* We start replaying at the end of the branch trace. This corresponds
1933 to the current instruction. */
1934 replay = XNEW (struct btrace_insn_iterator);
1935 btrace_insn_end (replay, btinfo);
1937 /* Skip gaps at the end of the trace. */
1938 while (btrace_insn_get (replay) == NULL)
1942 steps = btrace_insn_prev (replay, 1);
1944 error (_("No trace."));
1947 /* We're not replaying, yet. */
1948 gdb_assert (btinfo->replay == NULL);
1949 btinfo->replay = replay;
1951 /* Make sure we're not using any stale registers. */
1952 registers_changed_ptid (tp->ptid);
1954 /* The current frame with replaying - computed via btrace unwind. */
1955 frame = get_thread_current_frame (tp);
1956 frame_id = get_frame_id (frame);
1958 /* Replace stepping related frames where necessary. */
1959 if (upd_step_frame_id)
1960 tp->control.step_frame_id = frame_id;
1961 if (upd_step_stack_frame_id)
1962 tp->control.step_stack_frame_id = frame_id;
1964 CATCH (except, RETURN_MASK_ALL)
1966 xfree (btinfo->replay);
1967 btinfo->replay = NULL;
1969 registers_changed_ptid (tp->ptid);
1971 throw_exception (except);
1978 /* Stop replaying a thread. */
1981 record_btrace_stop_replaying (struct thread_info *tp)
1983 struct btrace_thread_info *btinfo;
1985 btinfo = &tp->btrace;
1987 xfree (btinfo->replay);
1988 btinfo->replay = NULL;
1990 /* Make sure we're not leaving any stale registers. */
1991 registers_changed_ptid (tp->ptid);
1994 /* Stop replaying TP if it is at the end of its execution history. */
1997 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1999 struct btrace_insn_iterator *replay, end;
2000 struct btrace_thread_info *btinfo;
2002 btinfo = &tp->btrace;
2003 replay = btinfo->replay;
2008 btrace_insn_end (&end, btinfo);
2010 if (btrace_insn_cmp (replay, &end) == 0)
2011 record_btrace_stop_replaying (tp);
2014 /* The to_resume method of target record-btrace. */
2017 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2018 enum gdb_signal signal)
2020 struct thread_info *tp;
2021 enum btrace_thread_flag flag, cflag;
2023 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2024 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2025 step ? "step" : "cont");
2027 /* Store the execution direction of the last resume.
2029 If there is more than one to_resume call, we have to rely on infrun
2030 to not change the execution direction in-between. */
2031 record_btrace_resume_exec_dir = execution_direction;
2033 /* As long as we're not replaying, just forward the request.
2035 For non-stop targets this means that no thread is replaying. In order to
2036 make progress, we may need to explicitly move replaying threads to the end
2037 of their execution history. */
2038 if ((execution_direction != EXEC_REVERSE)
2039 && !record_btrace_is_replaying (ops, minus_one_ptid))
2042 ops->to_resume (ops, ptid, step, signal);
2046 /* Compute the btrace thread flag for the requested move. */
2047 if (execution_direction == EXEC_REVERSE)
2049 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2054 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2058 /* We just indicate the resume intent here. The actual stepping happens in
2059 record_btrace_wait below.
2061 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2062 if (!target_is_non_stop_p ())
2064 gdb_assert (ptid_match (inferior_ptid, ptid));
2066 ALL_NON_EXITED_THREADS (tp)
2067 if (ptid_match (tp->ptid, ptid))
2069 if (ptid_match (tp->ptid, inferior_ptid))
2070 record_btrace_resume_thread (tp, flag);
2072 record_btrace_resume_thread (tp, cflag);
2077 ALL_NON_EXITED_THREADS (tp)
2078 if (ptid_match (tp->ptid, ptid))
2079 record_btrace_resume_thread (tp, flag);
2082 /* Async support. */
2083 if (target_can_async_p ())
2086 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2090 /* The to_commit_resume method of target record-btrace. */
2093 record_btrace_commit_resume (struct target_ops *ops)
2095 if ((execution_direction != EXEC_REVERSE)
2096 && !record_btrace_is_replaying (ops, minus_one_ptid))
2097 ops->beneath->to_commit_resume (ops->beneath);
2100 /* Cancel resuming TP. */
2103 record_btrace_cancel_resume (struct thread_info *tp)
2105 enum btrace_thread_flag flags;
2107 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2111 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2112 print_thread_id (tp),
2113 target_pid_to_str (tp->ptid), flags,
2114 btrace_thread_flag_to_str (flags));
2116 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2117 record_btrace_stop_replaying_at_end (tp);
2120 /* Return a target_waitstatus indicating that we ran out of history. */
2122 static struct target_waitstatus
2123 btrace_step_no_history (void)
2125 struct target_waitstatus status;
2127 status.kind = TARGET_WAITKIND_NO_HISTORY;
2132 /* Return a target_waitstatus indicating that a step finished. */
2134 static struct target_waitstatus
2135 btrace_step_stopped (void)
2137 struct target_waitstatus status;
2139 status.kind = TARGET_WAITKIND_STOPPED;
2140 status.value.sig = GDB_SIGNAL_TRAP;
2145 /* Return a target_waitstatus indicating that a thread was stopped as
2148 static struct target_waitstatus
2149 btrace_step_stopped_on_request (void)
2151 struct target_waitstatus status;
2153 status.kind = TARGET_WAITKIND_STOPPED;
2154 status.value.sig = GDB_SIGNAL_0;
2159 /* Return a target_waitstatus indicating a spurious stop. */
2161 static struct target_waitstatus
2162 btrace_step_spurious (void)
2164 struct target_waitstatus status;
2166 status.kind = TARGET_WAITKIND_SPURIOUS;
2171 /* Return a target_waitstatus indicating that the thread was not resumed. */
2173 static struct target_waitstatus
2174 btrace_step_no_resumed (void)
2176 struct target_waitstatus status;
2178 status.kind = TARGET_WAITKIND_NO_RESUMED;
2183 /* Return a target_waitstatus indicating that we should wait again. */
2185 static struct target_waitstatus
2186 btrace_step_again (void)
2188 struct target_waitstatus status;
2190 status.kind = TARGET_WAITKIND_IGNORE;
2195 /* Clear the record histories. */
2198 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2200 xfree (btinfo->insn_history);
2201 xfree (btinfo->call_history);
2203 btinfo->insn_history = NULL;
2204 btinfo->call_history = NULL;
2207 /* Check whether TP's current replay position is at a breakpoint. */
2210 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2212 struct btrace_insn_iterator *replay;
2213 struct btrace_thread_info *btinfo;
2214 const struct btrace_insn *insn;
2215 struct inferior *inf;
2217 btinfo = &tp->btrace;
2218 replay = btinfo->replay;
2223 insn = btrace_insn_get (replay);
2227 inf = find_inferior_ptid (tp->ptid);
2231 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2232 &btinfo->stop_reason);
2235 /* Step one instruction in forward direction. */
2237 static struct target_waitstatus
2238 record_btrace_single_step_forward (struct thread_info *tp)
2240 struct btrace_insn_iterator *replay, end, start;
2241 struct btrace_thread_info *btinfo;
2243 btinfo = &tp->btrace;
2244 replay = btinfo->replay;
2246 /* We're done if we're not replaying. */
2248 return btrace_step_no_history ();
2250 /* Check if we're stepping a breakpoint. */
2251 if (record_btrace_replay_at_breakpoint (tp))
2252 return btrace_step_stopped ();
2254 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2255 jump back to the instruction at which we started. */
2261 /* We will bail out here if we continue stepping after reaching the end
2262 of the execution history. */
2263 steps = btrace_insn_next (replay, 1);
2267 return btrace_step_no_history ();
2270 while (btrace_insn_get (replay) == NULL);
2272 /* Determine the end of the instruction trace. */
2273 btrace_insn_end (&end, btinfo);
2275 /* The execution trace contains (and ends with) the current instruction.
2276 This instruction has not been executed, yet, so the trace really ends
2277 one instruction earlier. */
2278 if (btrace_insn_cmp (replay, &end) == 0)
2279 return btrace_step_no_history ();
2281 return btrace_step_spurious ();
2284 /* Step one instruction in backward direction. */
2286 static struct target_waitstatus
2287 record_btrace_single_step_backward (struct thread_info *tp)
2289 struct btrace_insn_iterator *replay, start;
2290 struct btrace_thread_info *btinfo;
2292 btinfo = &tp->btrace;
2293 replay = btinfo->replay;
2295 /* Start replaying if we're not already doing so. */
2297 replay = record_btrace_start_replaying (tp);
2299 /* If we can't step any further, we reached the end of the history.
2300 Skip gaps during replay. If we end up at a gap (at the beginning of
2301 the trace), jump back to the instruction at which we started. */
2307 steps = btrace_insn_prev (replay, 1);
2311 return btrace_step_no_history ();
2314 while (btrace_insn_get (replay) == NULL);
2316 /* Check if we're stepping a breakpoint.
2318 For reverse-stepping, this check is after the step. There is logic in
2319 infrun.c that handles reverse-stepping separately. See, for example,
2320 proceed and adjust_pc_after_break.
2322 This code assumes that for reverse-stepping, PC points to the last
2323 de-executed instruction, whereas for forward-stepping PC points to the
2324 next to-be-executed instruction. */
2325 if (record_btrace_replay_at_breakpoint (tp))
2326 return btrace_step_stopped ();
2328 return btrace_step_spurious ();
2331 /* Step a single thread. */
2333 static struct target_waitstatus
2334 record_btrace_step_thread (struct thread_info *tp)
2336 struct btrace_thread_info *btinfo;
2337 struct target_waitstatus status;
2338 enum btrace_thread_flag flags;
2340 btinfo = &tp->btrace;
2342 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2343 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2345 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2346 target_pid_to_str (tp->ptid), flags,
2347 btrace_thread_flag_to_str (flags));
2349 /* We can't step without an execution history. */
2350 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2351 return btrace_step_no_history ();
2356 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2359 return btrace_step_stopped_on_request ();
2362 status = record_btrace_single_step_forward (tp);
2363 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2366 return btrace_step_stopped ();
2369 status = record_btrace_single_step_backward (tp);
2370 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2373 return btrace_step_stopped ();
2376 status = record_btrace_single_step_forward (tp);
2377 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2380 btinfo->flags |= flags;
2381 return btrace_step_again ();
2384 status = record_btrace_single_step_backward (tp);
2385 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2388 btinfo->flags |= flags;
2389 return btrace_step_again ();
2392 /* We keep threads moving at the end of their execution history. The to_wait
2393 method will stop the thread for whom the event is reported. */
2394 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2395 btinfo->flags |= flags;
2400 /* A vector of threads. */
2402 typedef struct thread_info * tp_t;
2405 /* Announce further events if necessary. */
2408 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2409 const VEC (tp_t) *no_history)
2411 int more_moving, more_no_history;
2413 more_moving = !VEC_empty (tp_t, moving);
2414 more_no_history = !VEC_empty (tp_t, no_history);
2416 if (!more_moving && !more_no_history)
2420 DEBUG ("movers pending");
2422 if (more_no_history)
2423 DEBUG ("no-history pending");
2425 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2428 /* The to_wait method of target record-btrace. */
2431 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2432 struct target_waitstatus *status, int options)
2434 VEC (tp_t) *moving, *no_history;
2435 struct thread_info *tp, *eventing;
2436 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2438 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2440 /* As long as we're not replaying, just forward the request. */
2441 if ((execution_direction != EXEC_REVERSE)
2442 && !record_btrace_is_replaying (ops, minus_one_ptid))
2445 return ops->to_wait (ops, ptid, status, options);
2451 make_cleanup (VEC_cleanup (tp_t), &moving);
2452 make_cleanup (VEC_cleanup (tp_t), &no_history);
2454 /* Keep a work list of moving threads. */
2455 ALL_NON_EXITED_THREADS (tp)
2456 if (ptid_match (tp->ptid, ptid)
2457 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2458 VEC_safe_push (tp_t, moving, tp);
2460 if (VEC_empty (tp_t, moving))
2462 *status = btrace_step_no_resumed ();
2464 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2465 target_waitstatus_to_string (status));
2467 do_cleanups (cleanups);
2471 /* Step moving threads one by one, one step each, until either one thread
2472 reports an event or we run out of threads to step.
2474 When stepping more than one thread, chances are that some threads reach
2475 the end of their execution history earlier than others. If we reported
2476 this immediately, all-stop on top of non-stop would stop all threads and
2477 resume the same threads next time. And we would report the same thread
2478 having reached the end of its execution history again.
2480 In the worst case, this would starve the other threads. But even if other
2481 threads would be allowed to make progress, this would result in far too
2482 many intermediate stops.
2484 We therefore delay the reporting of "no execution history" until we have
2485 nothing else to report. By this time, all threads should have moved to
2486 either the beginning or the end of their execution history. There will
2487 be a single user-visible stop. */
2489 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2494 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2496 *status = record_btrace_step_thread (tp);
2498 switch (status->kind)
2500 case TARGET_WAITKIND_IGNORE:
2504 case TARGET_WAITKIND_NO_HISTORY:
2505 VEC_safe_push (tp_t, no_history,
2506 VEC_ordered_remove (tp_t, moving, ix));
2510 eventing = VEC_unordered_remove (tp_t, moving, ix);
2516 if (eventing == NULL)
2518 /* We started with at least one moving thread. This thread must have
2519 either stopped or reached the end of its execution history.
2521 In the former case, EVENTING must not be NULL.
2522 In the latter case, NO_HISTORY must not be empty. */
2523 gdb_assert (!VEC_empty (tp_t, no_history));
2525 /* We kept threads moving at the end of their execution history. Stop
2526 EVENTING now that we are going to report its stop. */
2527 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2528 eventing->btrace.flags &= ~BTHR_MOVE;
2530 *status = btrace_step_no_history ();
2533 gdb_assert (eventing != NULL);
2535 /* We kept threads replaying at the end of their execution history. Stop
2536 replaying EVENTING now that we are going to report its stop. */
2537 record_btrace_stop_replaying_at_end (eventing);
2539 /* Stop all other threads. */
2540 if (!target_is_non_stop_p ())
2541 ALL_NON_EXITED_THREADS (tp)
2542 record_btrace_cancel_resume (tp);
2544 /* In async mode, we need to announce further events. */
2545 if (target_is_async_p ())
2546 record_btrace_maybe_mark_async_event (moving, no_history);
2548 /* Start record histories anew from the current position. */
2549 record_btrace_clear_histories (&eventing->btrace);
2551 /* We moved the replay position but did not update registers. */
2552 registers_changed_ptid (eventing->ptid);
2554 DEBUG ("wait ended by thread %s (%s): %s",
2555 print_thread_id (eventing),
2556 target_pid_to_str (eventing->ptid),
2557 target_waitstatus_to_string (status));
2559 do_cleanups (cleanups);
2560 return eventing->ptid;
2563 /* The to_stop method of target record-btrace. */
2566 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2568 DEBUG ("stop %s", target_pid_to_str (ptid));
2570 /* As long as we're not replaying, just forward the request. */
2571 if ((execution_direction != EXEC_REVERSE)
2572 && !record_btrace_is_replaying (ops, minus_one_ptid))
2575 ops->to_stop (ops, ptid);
2579 struct thread_info *tp;
2581 ALL_NON_EXITED_THREADS (tp)
2582 if (ptid_match (tp->ptid, ptid))
2584 tp->btrace.flags &= ~BTHR_MOVE;
2585 tp->btrace.flags |= BTHR_STOP;
2590 /* The to_can_execute_reverse method of target record-btrace. */
2593 record_btrace_can_execute_reverse (struct target_ops *self)
2598 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2601 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2603 if (record_btrace_is_replaying (ops, minus_one_ptid))
2605 struct thread_info *tp = inferior_thread ();
2607 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2610 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2613 /* The to_supports_stopped_by_sw_breakpoint method of target
2617 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2619 if (record_btrace_is_replaying (ops, minus_one_ptid))
2622 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2625 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2628 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2630 if (record_btrace_is_replaying (ops, minus_one_ptid))
2632 struct thread_info *tp = inferior_thread ();
2634 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2637 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2640 /* The to_supports_stopped_by_hw_breakpoint method of target
2644 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2646 if (record_btrace_is_replaying (ops, minus_one_ptid))
2649 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2652 /* The to_update_thread_list method of target record-btrace. */
2655 record_btrace_update_thread_list (struct target_ops *ops)
2657 /* We don't add or remove threads during replay. */
2658 if (record_btrace_is_replaying (ops, minus_one_ptid))
2661 /* Forward the request. */
2663 ops->to_update_thread_list (ops);
2666 /* The to_thread_alive method of target record-btrace. */
2669 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2671 /* We don't add or remove threads during replay. */
2672 if (record_btrace_is_replaying (ops, minus_one_ptid))
2673 return find_thread_ptid (ptid) != NULL;
2675 /* Forward the request. */
2677 return ops->to_thread_alive (ops, ptid);
2680 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2684 record_btrace_set_replay (struct thread_info *tp,
2685 const struct btrace_insn_iterator *it)
2687 struct btrace_thread_info *btinfo;
2689 btinfo = &tp->btrace;
2691 if (it == NULL || it->function == NULL)
2692 record_btrace_stop_replaying (tp);
2695 if (btinfo->replay == NULL)
2696 record_btrace_start_replaying (tp);
2697 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2700 *btinfo->replay = *it;
2701 registers_changed_ptid (tp->ptid);
2704 /* Start anew from the new replay position. */
2705 record_btrace_clear_histories (btinfo);
2707 stop_pc = regcache_read_pc (get_current_regcache ());
2708 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2711 /* The to_goto_record_begin method of target record-btrace. */
2714 record_btrace_goto_begin (struct target_ops *self)
2716 struct thread_info *tp;
2717 struct btrace_insn_iterator begin;
2719 tp = require_btrace_thread ();
2721 btrace_insn_begin (&begin, &tp->btrace);
2723 /* Skip gaps at the beginning of the trace. */
2724 while (btrace_insn_get (&begin) == NULL)
2728 steps = btrace_insn_next (&begin, 1);
2730 error (_("No trace."));
2733 record_btrace_set_replay (tp, &begin);
2736 /* The to_goto_record_end method of target record-btrace. */
2739 record_btrace_goto_end (struct target_ops *ops)
2741 struct thread_info *tp;
2743 tp = require_btrace_thread ();
2745 record_btrace_set_replay (tp, NULL);
2748 /* The to_goto_record method of target record-btrace. */
2751 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2753 struct thread_info *tp;
2754 struct btrace_insn_iterator it;
2755 unsigned int number;
2760 /* Check for wrap-arounds. */
2762 error (_("Instruction number out of range."));
2764 tp = require_btrace_thread ();
2766 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2768 /* Check if the instruction could not be found or is a gap. */
2769 if (found == 0 || btrace_insn_get (&it) == NULL)
2770 error (_("No such instruction."));
2772 record_btrace_set_replay (tp, &it);
2775 /* The to_record_stop_replaying method of target record-btrace. */
2778 record_btrace_stop_replaying_all (struct target_ops *self)
2780 struct thread_info *tp;
2782 ALL_NON_EXITED_THREADS (tp)
2783 record_btrace_stop_replaying (tp);
2786 /* The to_execution_direction target method. */
2788 static enum exec_direction_kind
2789 record_btrace_execution_direction (struct target_ops *self)
2791 return record_btrace_resume_exec_dir;
2794 /* The to_prepare_to_generate_core target method. */
2797 record_btrace_prepare_to_generate_core (struct target_ops *self)
2799 record_btrace_generating_corefile = 1;
2802 /* The to_done_generating_core target method. */
2805 record_btrace_done_generating_core (struct target_ops *self)
2807 record_btrace_generating_corefile = 0;
2810 /* Initialize the record-btrace target ops. */
2813 init_record_btrace_ops (void)
2815 struct target_ops *ops;
2817 ops = &record_btrace_ops;
2818 ops->to_shortname = "record-btrace";
2819 ops->to_longname = "Branch tracing target";
2820 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2821 ops->to_open = record_btrace_open;
2822 ops->to_close = record_btrace_close;
2823 ops->to_async = record_btrace_async;
2824 ops->to_detach = record_detach;
2825 ops->to_disconnect = record_btrace_disconnect;
2826 ops->to_mourn_inferior = record_mourn_inferior;
2827 ops->to_kill = record_kill;
2828 ops->to_stop_recording = record_btrace_stop_recording;
2829 ops->to_info_record = record_btrace_info;
2830 ops->to_insn_history = record_btrace_insn_history;
2831 ops->to_insn_history_from = record_btrace_insn_history_from;
2832 ops->to_insn_history_range = record_btrace_insn_history_range;
2833 ops->to_call_history = record_btrace_call_history;
2834 ops->to_call_history_from = record_btrace_call_history_from;
2835 ops->to_call_history_range = record_btrace_call_history_range;
2836 ops->to_record_is_replaying = record_btrace_is_replaying;
2837 ops->to_record_will_replay = record_btrace_will_replay;
2838 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2839 ops->to_xfer_partial = record_btrace_xfer_partial;
2840 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2841 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2842 ops->to_fetch_registers = record_btrace_fetch_registers;
2843 ops->to_store_registers = record_btrace_store_registers;
2844 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2845 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2846 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2847 ops->to_resume = record_btrace_resume;
2848 ops->to_commit_resume = record_btrace_commit_resume;
2849 ops->to_wait = record_btrace_wait;
2850 ops->to_stop = record_btrace_stop;
2851 ops->to_update_thread_list = record_btrace_update_thread_list;
2852 ops->to_thread_alive = record_btrace_thread_alive;
2853 ops->to_goto_record_begin = record_btrace_goto_begin;
2854 ops->to_goto_record_end = record_btrace_goto_end;
2855 ops->to_goto_record = record_btrace_goto;
2856 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2857 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2858 ops->to_supports_stopped_by_sw_breakpoint
2859 = record_btrace_supports_stopped_by_sw_breakpoint;
2860 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2861 ops->to_supports_stopped_by_hw_breakpoint
2862 = record_btrace_supports_stopped_by_hw_breakpoint;
2863 ops->to_execution_direction = record_btrace_execution_direction;
2864 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2865 ops->to_done_generating_core = record_btrace_done_generating_core;
2866 ops->to_stratum = record_stratum;
2867 ops->to_magic = OPS_MAGIC;
2870 /* Start recording in BTS format. */
2873 cmd_record_btrace_bts_start (char *args, int from_tty)
2875 if (args != NULL && *args != 0)
2876 error (_("Invalid argument."));
2878 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2882 execute_command ("target record-btrace", from_tty);
2884 CATCH (exception, RETURN_MASK_ALL)
2886 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2887 throw_exception (exception);
2892 /* Start recording in Intel Processor Trace format. */
2895 cmd_record_btrace_pt_start (char *args, int from_tty)
2897 if (args != NULL && *args != 0)
2898 error (_("Invalid argument."));
2900 record_btrace_conf.format = BTRACE_FORMAT_PT;
2904 execute_command ("target record-btrace", from_tty);
2906 CATCH (exception, RETURN_MASK_ALL)
2908 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2909 throw_exception (exception);
2914 /* Alias for "target record". */
2917 cmd_record_btrace_start (char *args, int from_tty)
2919 if (args != NULL && *args != 0)
2920 error (_("Invalid argument."));
2922 record_btrace_conf.format = BTRACE_FORMAT_PT;
2926 execute_command ("target record-btrace", from_tty);
2928 CATCH (exception, RETURN_MASK_ALL)
2930 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2934 execute_command ("target record-btrace", from_tty);
2936 CATCH (exception, RETURN_MASK_ALL)
2938 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2939 throw_exception (exception);
2946 /* The "set record btrace" command. */
2949 cmd_set_record_btrace (char *args, int from_tty)
2951 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2954 /* The "show record btrace" command. */
2957 cmd_show_record_btrace (char *args, int from_tty)
2959 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2962 /* The "show record btrace replay-memory-access" command. */
2965 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2966 struct cmd_list_element *c, const char *value)
2968 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2969 replay_memory_access);
2972 /* The "set record btrace bts" command. */
2975 cmd_set_record_btrace_bts (char *args, int from_tty)
2977 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2978 "by an appropriate subcommand.\n"));
2979 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2980 all_commands, gdb_stdout);
2983 /* The "show record btrace bts" command. */
2986 cmd_show_record_btrace_bts (char *args, int from_tty)
2988 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2991 /* The "set record btrace pt" command. */
2994 cmd_set_record_btrace_pt (char *args, int from_tty)
2996 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2997 "by an appropriate subcommand.\n"));
2998 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2999 all_commands, gdb_stdout);
3002 /* The "show record btrace pt" command. */
3005 cmd_show_record_btrace_pt (char *args, int from_tty)
3007 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3010 /* The "record bts buffer-size" show value function. */
3013 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3014 struct cmd_list_element *c,
3017 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3021 /* The "record pt buffer-size" show value function. */
3024 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3025 struct cmd_list_element *c,
3028 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3032 void _initialize_record_btrace (void);
3034 /* Initialize btrace commands. */
3037 _initialize_record_btrace (void)
3039 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3040 _("Start branch trace recording."), &record_btrace_cmdlist,
3041 "record btrace ", 0, &record_cmdlist);
3042 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3044 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3046 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3047 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3048 This format may not be available on all processors."),
3049 &record_btrace_cmdlist);
3050 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3052 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3054 Start branch trace recording in Intel Processor Trace format.\n\n\
3055 This format may not be available on all processors."),
3056 &record_btrace_cmdlist);
3057 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3059 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3060 _("Set record options"), &set_record_btrace_cmdlist,
3061 "set record btrace ", 0, &set_record_cmdlist);
3063 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3064 _("Show record options"), &show_record_btrace_cmdlist,
3065 "show record btrace ", 0, &show_record_cmdlist);
3067 add_setshow_enum_cmd ("replay-memory-access", no_class,
3068 replay_memory_access_types, &replay_memory_access, _("\
3069 Set what memory accesses are allowed during replay."), _("\
3070 Show what memory accesses are allowed during replay."),
3071 _("Default is READ-ONLY.\n\n\
3072 The btrace record target does not trace data.\n\
3073 The memory therefore corresponds to the live target and not \
3074 to the current replay position.\n\n\
3075 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3076 When READ-WRITE, allow accesses to read-only and read-write memory during \
3078 NULL, cmd_show_replay_memory_access,
3079 &set_record_btrace_cmdlist,
3080 &show_record_btrace_cmdlist);
3082 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3083 _("Set record btrace bts options"),
3084 &set_record_btrace_bts_cmdlist,
3085 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3087 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3088 _("Show record btrace bts options"),
3089 &show_record_btrace_bts_cmdlist,
3090 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3092 add_setshow_uinteger_cmd ("buffer-size", no_class,
3093 &record_btrace_conf.bts.size,
3094 _("Set the record/replay bts buffer size."),
3095 _("Show the record/replay bts buffer size."), _("\
3096 When starting recording request a trace buffer of this size. \
3097 The actual buffer size may differ from the requested size. \
3098 Use \"info record\" to see the actual buffer size.\n\n\
3099 Bigger buffers allow longer recording but also take more time to process \
3100 the recorded execution trace.\n\n\
3101 The trace buffer size may not be changed while recording."), NULL,
3102 show_record_bts_buffer_size_value,
3103 &set_record_btrace_bts_cmdlist,
3104 &show_record_btrace_bts_cmdlist);
3106 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3107 _("Set record btrace pt options"),
3108 &set_record_btrace_pt_cmdlist,
3109 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3111 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3112 _("Show record btrace pt options"),
3113 &show_record_btrace_pt_cmdlist,
3114 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3116 add_setshow_uinteger_cmd ("buffer-size", no_class,
3117 &record_btrace_conf.pt.size,
3118 _("Set the record/replay pt buffer size."),
3119 _("Show the record/replay pt buffer size."), _("\
3120 Bigger buffers allow longer recording but also take more time to process \
3121 the recorded execution.\n\
3122 The actual buffer size may differ from the requested size. Use \"info record\" \
3123 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3124 &set_record_btrace_pt_cmdlist,
3125 &show_record_btrace_pt_cmdlist);
3127 init_record_btrace_ops ();
3128 add_target (&record_btrace_ops);
3130 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3133 record_btrace_conf.bts.size = 64 * 1024;
3134 record_btrace_conf.pt.size = 16 * 1024;