1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info *
110 require_btrace_thread (void)
112 struct thread_info *tp;
116 tp = find_thread_ptid (inferior_ptid);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info *
137 require_btrace (void)
139 struct thread_info *tp;
141 tp = require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info *tp)
153 btrace_enable (tp, &record_btrace_conf);
155 CATCH (error, RETURN_MASK_ERROR)
157 warning ("%s", error.message);
162 /* Enable automatic tracing of new threads. */
165 record_btrace_auto_enable (void)
167 DEBUG ("attach thread observer");
169 record_btrace_thread_observer
170 = observer_attach_new_thread (record_btrace_enable_warn);
173 /* Disable automatic tracing of new threads. */
176 record_btrace_auto_disable (void)
178 /* The observer may have been detached, already. */
179 if (record_btrace_thread_observer == NULL)
182 DEBUG ("detach thread observer");
184 observer_detach_new_thread (record_btrace_thread_observer);
185 record_btrace_thread_observer = NULL;
188 /* The record-btrace async event handler function. */
191 record_btrace_handle_async_inferior_event (gdb_client_data data)
193 inferior_event_handler (INF_REG_EVENT, NULL);
196 /* See record-btrace.h. */
199 record_btrace_push_target (void)
203 record_btrace_auto_enable ();
205 push_target (&record_btrace_ops);
207 record_btrace_async_inferior_event_handler
208 = create_async_event_handler (record_btrace_handle_async_inferior_event,
210 record_btrace_generating_corefile = 0;
212 format = btrace_format_short_string (record_btrace_conf.format);
213 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
216 /* Disable btrace on a set of threads on scope exit. */
218 struct scoped_btrace_disable
220 scoped_btrace_disable () = default;
222 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
224 ~scoped_btrace_disable ()
226 for (thread_info *tp : m_threads)
230 void add_thread (thread_info *thread)
232 m_threads.push_front (thread);
241 std::forward_list<thread_info *> m_threads;
244 /* The to_open method of target record-btrace. */
247 record_btrace_open (const char *args, int from_tty)
249 /* If we fail to enable btrace for one thread, disable it for the threads for
250 which it was successfully enabled. */
251 scoped_btrace_disable btrace_disable;
252 struct thread_info *tp;
258 if (!target_has_execution)
259 error (_("The program is not being run."));
261 gdb_assert (record_btrace_thread_observer == NULL);
263 ALL_NON_EXITED_THREADS (tp)
264 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
266 btrace_enable (tp, &record_btrace_conf);
268 btrace_disable.add_thread (tp);
271 record_btrace_push_target ();
273 btrace_disable.discard ();
276 /* The to_stop_recording method of target record-btrace. */
279 record_btrace_stop_recording (struct target_ops *self)
281 struct thread_info *tp;
283 DEBUG ("stop recording");
285 record_btrace_auto_disable ();
287 ALL_NON_EXITED_THREADS (tp)
288 if (tp->btrace.target != NULL)
292 /* The to_disconnect method of target record-btrace. */
295 record_btrace_disconnect (struct target_ops *self, const char *args,
298 struct target_ops *beneath = self->beneath;
300 /* Do not stop recording, just clean up GDB side. */
301 unpush_target (self);
303 /* Forward disconnect. */
304 beneath->to_disconnect (beneath, args, from_tty);
307 /* The to_close method of target record-btrace. */
310 record_btrace_close (struct target_ops *self)
312 struct thread_info *tp;
314 if (record_btrace_async_inferior_event_handler != NULL)
315 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
317 /* Make sure automatic recording gets disabled even if we did not stop
318 recording before closing the record-btrace target. */
319 record_btrace_auto_disable ();
321 /* We should have already stopped recording.
322 Tear down btrace in case we have not. */
323 ALL_NON_EXITED_THREADS (tp)
324 btrace_teardown (tp);
327 /* The to_async method of target record-btrace. */
330 record_btrace_async (struct target_ops *ops, int enable)
333 mark_async_event_handler (record_btrace_async_inferior_event_handler);
335 clear_async_event_handler (record_btrace_async_inferior_event_handler);
337 ops->beneath->to_async (ops->beneath, enable);
340 /* Adjusts the size and returns a human readable size suffix. */
343 record_btrace_adjust_size (unsigned int *size)
349 if ((sz & ((1u << 30) - 1)) == 0)
354 else if ((sz & ((1u << 20) - 1)) == 0)
359 else if ((sz & ((1u << 10) - 1)) == 0)
368 /* Print a BTS configuration. */
371 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
379 suffix = record_btrace_adjust_size (&size);
380 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
384 /* Print an Intel Processor Trace configuration. */
387 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
395 suffix = record_btrace_adjust_size (&size);
396 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
400 /* Print a branch tracing configuration. */
403 record_btrace_print_conf (const struct btrace_config *conf)
405 printf_unfiltered (_("Recording format: %s.\n"),
406 btrace_format_string (conf->format));
408 switch (conf->format)
410 case BTRACE_FORMAT_NONE:
413 case BTRACE_FORMAT_BTS:
414 record_btrace_print_bts_conf (&conf->bts);
417 case BTRACE_FORMAT_PT:
418 record_btrace_print_pt_conf (&conf->pt);
422 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
425 /* The to_info_record method of target record-btrace. */
428 record_btrace_info (struct target_ops *self)
430 struct btrace_thread_info *btinfo;
431 const struct btrace_config *conf;
432 struct thread_info *tp;
433 unsigned int insns, calls, gaps;
437 tp = find_thread_ptid (inferior_ptid);
439 error (_("No thread."));
441 validate_registers_access ();
443 btinfo = &tp->btrace;
445 conf = btrace_conf (btinfo);
447 record_btrace_print_conf (conf);
455 if (!btrace_is_empty (tp))
457 struct btrace_call_iterator call;
458 struct btrace_insn_iterator insn;
460 btrace_call_end (&call, btinfo);
461 btrace_call_prev (&call, 1);
462 calls = btrace_call_number (&call);
464 btrace_insn_end (&insn, btinfo);
465 insns = btrace_insn_number (&insn);
467 /* If the last instruction is not a gap, it is the current instruction
468 that is not actually part of the record. */
469 if (btrace_insn_get (&insn) != NULL)
472 gaps = btinfo->ngaps;
475 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
476 "for thread %s (%s).\n"), insns, calls, gaps,
477 print_thread_id (tp), target_pid_to_str (tp->ptid));
479 if (btrace_is_replaying (tp))
480 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
481 btrace_insn_number (btinfo->replay));
484 /* Print a decode error. */
487 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
488 enum btrace_format format)
490 const char *errstr = btrace_decode_error (format, errcode);
492 uiout->text (_("["));
493 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
494 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
496 uiout->text (_("decode error ("));
497 uiout->field_int ("errcode", errcode);
498 uiout->text (_("): "));
500 uiout->text (errstr);
501 uiout->text (_("]\n"));
504 /* Print an unsigned int. */
507 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
509 uiout->field_fmt (fld, "%u", val);
512 /* A range of source lines. */
514 struct btrace_line_range
516 /* The symtab this line is from. */
517 struct symtab *symtab;
519 /* The first line (inclusive). */
522 /* The last line (exclusive). */
526 /* Construct a line range. */
528 static struct btrace_line_range
529 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
531 struct btrace_line_range range;
533 range.symtab = symtab;
540 /* Add a line to a line range. */
542 static struct btrace_line_range
543 btrace_line_range_add (struct btrace_line_range range, int line)
545 if (range.end <= range.begin)
547 /* This is the first entry. */
549 range.end = line + 1;
551 else if (line < range.begin)
553 else if (range.end < line)
559 /* Return non-zero if RANGE is empty, zero otherwise. */
562 btrace_line_range_is_empty (struct btrace_line_range range)
564 return range.end <= range.begin;
567 /* Return non-zero if LHS contains RHS, zero otherwise. */
570 btrace_line_range_contains_range (struct btrace_line_range lhs,
571 struct btrace_line_range rhs)
573 return ((lhs.symtab == rhs.symtab)
574 && (lhs.begin <= rhs.begin)
575 && (rhs.end <= lhs.end));
578 /* Find the line range associated with PC. */
580 static struct btrace_line_range
581 btrace_find_line_range (CORE_ADDR pc)
583 struct btrace_line_range range;
584 struct linetable_entry *lines;
585 struct linetable *ltable;
586 struct symtab *symtab;
589 symtab = find_pc_line_symtab (pc);
591 return btrace_mk_line_range (NULL, 0, 0);
593 ltable = SYMTAB_LINETABLE (symtab);
595 return btrace_mk_line_range (symtab, 0, 0);
597 nlines = ltable->nitems;
598 lines = ltable->item;
600 return btrace_mk_line_range (symtab, 0, 0);
602 range = btrace_mk_line_range (symtab, 0, 0);
603 for (i = 0; i < nlines - 1; i++)
605 if ((lines[i].pc == pc) && (lines[i].line != 0))
606 range = btrace_line_range_add (range, lines[i].line);
612 /* Print source lines in LINES to UIOUT.
614 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
615 instructions corresponding to that source line. When printing a new source
616 line, we do the cleanups for the open chain and open a new cleanup chain for
617 the new source line. If the source line range in LINES is not empty, this
618 function will leave the cleanup chain for the last printed source line open
619 so instructions can be added to it. */
622 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
623 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
624 gdb::optional<ui_out_emit_list> *asm_list,
625 gdb_disassembly_flags flags)
627 print_source_lines_flags psl_flags;
629 if (flags & DISASSEMBLY_FILENAME)
630 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
632 for (int line = lines.begin; line < lines.end; ++line)
636 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
638 print_source_lines (lines.symtab, line, line + 1, psl_flags);
640 asm_list->emplace (uiout, "line_asm_insn");
644 /* Disassemble a section of the recorded instruction trace. */
647 btrace_insn_history (struct ui_out *uiout,
648 const struct btrace_thread_info *btinfo,
649 const struct btrace_insn_iterator *begin,
650 const struct btrace_insn_iterator *end,
651 gdb_disassembly_flags flags)
653 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
654 btrace_insn_number (begin), btrace_insn_number (end));
656 flags |= DISASSEMBLY_SPECULATIVE;
658 struct gdbarch *gdbarch = target_gdbarch ();
659 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
661 ui_out_emit_list list_emitter (uiout, "asm_insns");
663 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
664 gdb::optional<ui_out_emit_list> asm_list;
666 gdb_pretty_print_disassembler disasm (gdbarch);
668 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
669 btrace_insn_next (&it, 1))
671 const struct btrace_insn *insn;
673 insn = btrace_insn_get (&it);
675 /* A NULL instruction indicates a gap in the trace. */
678 const struct btrace_config *conf;
680 conf = btrace_conf (btinfo);
682 /* We have trace so we must have a configuration. */
683 gdb_assert (conf != NULL);
685 uiout->field_fmt ("insn-number", "%u",
686 btrace_insn_number (&it));
689 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
694 struct disasm_insn dinsn;
696 if ((flags & DISASSEMBLY_SOURCE) != 0)
698 struct btrace_line_range lines;
700 lines = btrace_find_line_range (insn->pc);
701 if (!btrace_line_range_is_empty (lines)
702 && !btrace_line_range_contains_range (last_lines, lines))
704 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
708 else if (!src_and_asm_tuple.has_value ())
710 gdb_assert (!asm_list.has_value ());
712 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
714 /* No source information. */
715 asm_list.emplace (uiout, "line_asm_insn");
718 gdb_assert (src_and_asm_tuple.has_value ());
719 gdb_assert (asm_list.has_value ());
722 memset (&dinsn, 0, sizeof (dinsn));
723 dinsn.number = btrace_insn_number (&it);
724 dinsn.addr = insn->pc;
726 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
727 dinsn.is_speculative = 1;
729 disasm.pretty_print_insn (uiout, &dinsn, flags);
734 /* The to_insn_history method of target record-btrace. */
737 record_btrace_insn_history (struct target_ops *self, int size,
738 gdb_disassembly_flags flags)
740 struct btrace_thread_info *btinfo;
741 struct btrace_insn_history *history;
742 struct btrace_insn_iterator begin, end;
743 struct ui_out *uiout;
744 unsigned int context, covered;
746 uiout = current_uiout;
747 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
748 context = abs (size);
750 error (_("Bad record instruction-history-size."));
752 btinfo = require_btrace ();
753 history = btinfo->insn_history;
756 struct btrace_insn_iterator *replay;
758 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
760 /* If we're replaying, we start at the replay position. Otherwise, we
761 start at the tail of the trace. */
762 replay = btinfo->replay;
766 btrace_insn_end (&begin, btinfo);
768 /* We start from here and expand in the requested direction. Then we
769 expand in the other direction, as well, to fill up any remaining
774 /* We want the current position covered, as well. */
775 covered = btrace_insn_next (&end, 1);
776 covered += btrace_insn_prev (&begin, context - covered);
777 covered += btrace_insn_next (&end, context - covered);
781 covered = btrace_insn_next (&end, context);
782 covered += btrace_insn_prev (&begin, context - covered);
787 begin = history->begin;
790 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
791 btrace_insn_number (&begin), btrace_insn_number (&end));
796 covered = btrace_insn_prev (&begin, context);
801 covered = btrace_insn_next (&end, context);
806 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
810 printf_unfiltered (_("At the start of the branch trace record.\n"));
812 printf_unfiltered (_("At the end of the branch trace record.\n"));
815 btrace_set_insn_history (btinfo, &begin, &end);
818 /* The to_insn_history_range method of target record-btrace. */
821 record_btrace_insn_history_range (struct target_ops *self,
822 ULONGEST from, ULONGEST to,
823 gdb_disassembly_flags flags)
825 struct btrace_thread_info *btinfo;
826 struct btrace_insn_iterator begin, end;
827 struct ui_out *uiout;
828 unsigned int low, high;
831 uiout = current_uiout;
832 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
836 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
838 /* Check for wrap-arounds. */
839 if (low != from || high != to)
840 error (_("Bad range."));
843 error (_("Bad range."));
845 btinfo = require_btrace ();
847 found = btrace_find_insn_by_number (&begin, btinfo, low);
849 error (_("Range out of bounds."));
851 found = btrace_find_insn_by_number (&end, btinfo, high);
854 /* Silently truncate the range. */
855 btrace_insn_end (&end, btinfo);
859 /* We want both begin and end to be inclusive. */
860 btrace_insn_next (&end, 1);
863 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
864 btrace_set_insn_history (btinfo, &begin, &end);
867 /* The to_insn_history_from method of target record-btrace. */
870 record_btrace_insn_history_from (struct target_ops *self,
871 ULONGEST from, int size,
872 gdb_disassembly_flags flags)
874 ULONGEST begin, end, context;
876 context = abs (size);
878 error (_("Bad record instruction-history-size."));
887 begin = from - context + 1;
892 end = from + context - 1;
894 /* Check for wrap-around. */
899 record_btrace_insn_history_range (self, begin, end, flags);
902 /* Print the instruction number range for a function call history line. */
905 btrace_call_history_insn_range (struct ui_out *uiout,
906 const struct btrace_function *bfun)
908 unsigned int begin, end, size;
910 size = bfun->insn.size ();
911 gdb_assert (size > 0);
913 begin = bfun->insn_offset;
914 end = begin + size - 1;
916 ui_out_field_uint (uiout, "insn begin", begin);
918 ui_out_field_uint (uiout, "insn end", end);
921 /* Compute the lowest and highest source line for the instructions in BFUN
922 and return them in PBEGIN and PEND.
923 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
924 result from inlining or macro expansion. */
927 btrace_compute_src_line_range (const struct btrace_function *bfun,
928 int *pbegin, int *pend)
930 struct symtab *symtab;
941 symtab = symbol_symtab (sym);
943 for (const btrace_insn &insn : bfun->insn)
945 struct symtab_and_line sal;
947 sal = find_pc_line (insn.pc, 0);
948 if (sal.symtab != symtab || sal.line == 0)
951 begin = std::min (begin, sal.line);
952 end = std::max (end, sal.line);
960 /* Print the source line information for a function call history line. */
963 btrace_call_history_src_line (struct ui_out *uiout,
964 const struct btrace_function *bfun)
973 uiout->field_string ("file",
974 symtab_to_filename_for_display (symbol_symtab (sym)));
976 btrace_compute_src_line_range (bfun, &begin, &end);
981 uiout->field_int ("min line", begin);
987 uiout->field_int ("max line", end);
990 /* Get the name of a branch trace function. */
993 btrace_get_bfun_name (const struct btrace_function *bfun)
995 struct minimal_symbol *msym;
1005 return SYMBOL_PRINT_NAME (sym);
1006 else if (msym != NULL)
1007 return MSYMBOL_PRINT_NAME (msym);
1012 /* Disassemble a section of the recorded function trace. */
1015 btrace_call_history (struct ui_out *uiout,
1016 const struct btrace_thread_info *btinfo,
1017 const struct btrace_call_iterator *begin,
1018 const struct btrace_call_iterator *end,
1021 struct btrace_call_iterator it;
1022 record_print_flags flags = (enum record_print_flag) int_flags;
1024 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1025 btrace_call_number (end));
1027 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1029 const struct btrace_function *bfun;
1030 struct minimal_symbol *msym;
1033 bfun = btrace_call_get (&it);
1037 /* Print the function index. */
1038 ui_out_field_uint (uiout, "index", bfun->number);
1041 /* Indicate gaps in the trace. */
1042 if (bfun->errcode != 0)
1044 const struct btrace_config *conf;
1046 conf = btrace_conf (btinfo);
1048 /* We have trace so we must have a configuration. */
1049 gdb_assert (conf != NULL);
1051 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1056 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1058 int level = bfun->level + btinfo->level, i;
1060 for (i = 0; i < level; ++i)
1065 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1066 else if (msym != NULL)
1067 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1068 else if (!uiout->is_mi_like_p ())
1069 uiout->field_string ("function", "??");
1071 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1073 uiout->text (_("\tinst "));
1074 btrace_call_history_insn_range (uiout, bfun);
1077 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1079 uiout->text (_("\tat "));
1080 btrace_call_history_src_line (uiout, bfun);
1087 /* The to_call_history method of target record-btrace. */
1090 record_btrace_call_history (struct target_ops *self, int size,
1091 record_print_flags flags)
1093 struct btrace_thread_info *btinfo;
1094 struct btrace_call_history *history;
1095 struct btrace_call_iterator begin, end;
1096 struct ui_out *uiout;
1097 unsigned int context, covered;
1099 uiout = current_uiout;
1100 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1101 context = abs (size);
1103 error (_("Bad record function-call-history-size."));
1105 btinfo = require_btrace ();
1106 history = btinfo->call_history;
1107 if (history == NULL)
1109 struct btrace_insn_iterator *replay;
1111 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1113 /* If we're replaying, we start at the replay position. Otherwise, we
1114 start at the tail of the trace. */
1115 replay = btinfo->replay;
1118 begin.btinfo = btinfo;
1119 begin.index = replay->call_index;
1122 btrace_call_end (&begin, btinfo);
1124 /* We start from here and expand in the requested direction. Then we
1125 expand in the other direction, as well, to fill up any remaining
1130 /* We want the current position covered, as well. */
1131 covered = btrace_call_next (&end, 1);
1132 covered += btrace_call_prev (&begin, context - covered);
1133 covered += btrace_call_next (&end, context - covered);
1137 covered = btrace_call_next (&end, context);
1138 covered += btrace_call_prev (&begin, context- covered);
1143 begin = history->begin;
1146 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1147 btrace_call_number (&begin), btrace_call_number (&end));
1152 covered = btrace_call_prev (&begin, context);
1157 covered = btrace_call_next (&end, context);
1162 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1166 printf_unfiltered (_("At the start of the branch trace record.\n"));
1168 printf_unfiltered (_("At the end of the branch trace record.\n"));
1171 btrace_set_call_history (btinfo, &begin, &end);
1174 /* The to_call_history_range method of target record-btrace. */
1177 record_btrace_call_history_range (struct target_ops *self,
1178 ULONGEST from, ULONGEST to,
1179 record_print_flags flags)
1181 struct btrace_thread_info *btinfo;
1182 struct btrace_call_iterator begin, end;
1183 struct ui_out *uiout;
1184 unsigned int low, high;
1187 uiout = current_uiout;
1188 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1192 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1194 /* Check for wrap-arounds. */
1195 if (low != from || high != to)
1196 error (_("Bad range."));
1199 error (_("Bad range."));
1201 btinfo = require_btrace ();
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1205 error (_("Range out of bounds."));
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220 btrace_set_call_history (btinfo, &begin, &end);
1223 /* The to_call_history_from method of target record-btrace. */
1226 record_btrace_call_history_from (struct target_ops *self,
1227 ULONGEST from, int size,
1228 record_print_flags flags)
1230 ULONGEST begin, end, context;
1232 context = abs (size);
1234 error (_("Bad record function-call-history-size."));
1243 begin = from - context + 1;
1248 end = from + context - 1;
1250 /* Check for wrap-around. */
1255 record_btrace_call_history_range (self, begin, end, flags);
1258 /* The to_record_method method of target record-btrace. */
1260 static enum record_method
1261 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1263 struct thread_info * const tp = find_thread_ptid (ptid);
1266 error (_("No thread."));
1268 if (tp->btrace.target == NULL)
1269 return RECORD_METHOD_NONE;
1271 return RECORD_METHOD_BTRACE;
1274 /* The to_record_is_replaying method of target record-btrace. */
1277 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1279 struct thread_info *tp;
1281 ALL_NON_EXITED_THREADS (tp)
1282 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1288 /* The to_record_will_replay method of target record-btrace. */
1291 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1293 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1296 /* The to_xfer_partial method of target record-btrace. */
1298 static enum target_xfer_status
1299 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1300 const char *annex, gdb_byte *readbuf,
1301 const gdb_byte *writebuf, ULONGEST offset,
1302 ULONGEST len, ULONGEST *xfered_len)
1304 /* Filter out requests that don't make sense during replay. */
1305 if (replay_memory_access == replay_memory_access_read_only
1306 && !record_btrace_generating_corefile
1307 && record_btrace_is_replaying (ops, inferior_ptid))
1311 case TARGET_OBJECT_MEMORY:
1313 struct target_section *section;
1315 /* We do not allow writing memory in general. */
1316 if (writebuf != NULL)
1319 return TARGET_XFER_UNAVAILABLE;
1322 /* We allow reading readonly memory. */
1323 section = target_section_by_addr (ops, offset);
1324 if (section != NULL)
1326 /* Check if the section we found is readonly. */
1327 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1328 section->the_bfd_section)
1329 & SEC_READONLY) != 0)
1331 /* Truncate the request to fit into this section. */
1332 len = std::min (len, section->endaddr - offset);
1338 return TARGET_XFER_UNAVAILABLE;
1343 /* Forward the request. */
1345 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1346 offset, len, xfered_len);
1349 /* The to_insert_breakpoint method of target record-btrace. */
1352 record_btrace_insert_breakpoint (struct target_ops *ops,
1353 struct gdbarch *gdbarch,
1354 struct bp_target_info *bp_tgt)
1359 /* Inserting breakpoints requires accessing memory. Allow it for the
1360 duration of this function. */
1361 old = replay_memory_access;
1362 replay_memory_access = replay_memory_access_read_write;
1367 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1369 CATCH (except, RETURN_MASK_ALL)
1371 replay_memory_access = old;
1372 throw_exception (except);
1375 replay_memory_access = old;
1380 /* The to_remove_breakpoint method of target record-btrace. */
1383 record_btrace_remove_breakpoint (struct target_ops *ops,
1384 struct gdbarch *gdbarch,
1385 struct bp_target_info *bp_tgt,
1386 enum remove_bp_reason reason)
1391 /* Removing breakpoints requires accessing memory. Allow it for the
1392 duration of this function. */
1393 old = replay_memory_access;
1394 replay_memory_access = replay_memory_access_read_write;
1399 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1402 CATCH (except, RETURN_MASK_ALL)
1404 replay_memory_access = old;
1405 throw_exception (except);
1408 replay_memory_access = old;
1413 /* The to_fetch_registers method of target record-btrace. */
1416 record_btrace_fetch_registers (struct target_ops *ops,
1417 struct regcache *regcache, int regno)
1419 struct btrace_insn_iterator *replay;
1420 struct thread_info *tp;
1422 tp = find_thread_ptid (regcache_get_ptid (regcache));
1423 gdb_assert (tp != NULL);
1425 replay = tp->btrace.replay;
1426 if (replay != NULL && !record_btrace_generating_corefile)
1428 const struct btrace_insn *insn;
1429 struct gdbarch *gdbarch;
1432 gdbarch = regcache->arch ();
1433 pcreg = gdbarch_pc_regnum (gdbarch);
1437 /* We can only provide the PC register. */
1438 if (regno >= 0 && regno != pcreg)
1441 insn = btrace_insn_get (replay);
1442 gdb_assert (insn != NULL);
1444 regcache_raw_supply (regcache, regno, &insn->pc);
1448 struct target_ops *t = ops->beneath;
1450 t->to_fetch_registers (t, regcache, regno);
1454 /* The to_store_registers method of target record-btrace. */
1457 record_btrace_store_registers (struct target_ops *ops,
1458 struct regcache *regcache, int regno)
1460 struct target_ops *t;
1462 if (!record_btrace_generating_corefile
1463 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1464 error (_("Cannot write registers while replaying."));
1466 gdb_assert (may_write_registers != 0);
1469 t->to_store_registers (t, regcache, regno);
1472 /* The to_prepare_to_store method of target record-btrace. */
1475 record_btrace_prepare_to_store (struct target_ops *ops,
1476 struct regcache *regcache)
1478 struct target_ops *t;
1480 if (!record_btrace_generating_corefile
1481 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1485 t->to_prepare_to_store (t, regcache);
1488 /* The branch trace frame cache. */
1490 struct btrace_frame_cache
1493 struct thread_info *tp;
1495 /* The frame info. */
1496 struct frame_info *frame;
1498 /* The branch trace function segment. */
1499 const struct btrace_function *bfun;
1502 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1504 static htab_t bfcache;
1506 /* hash_f for htab_create_alloc of bfcache. */
1509 bfcache_hash (const void *arg)
1511 const struct btrace_frame_cache *cache
1512 = (const struct btrace_frame_cache *) arg;
1514 return htab_hash_pointer (cache->frame);
1517 /* eq_f for htab_create_alloc of bfcache. */
1520 bfcache_eq (const void *arg1, const void *arg2)
1522 const struct btrace_frame_cache *cache1
1523 = (const struct btrace_frame_cache *) arg1;
1524 const struct btrace_frame_cache *cache2
1525 = (const struct btrace_frame_cache *) arg2;
1527 return cache1->frame == cache2->frame;
1530 /* Create a new btrace frame cache. */
1532 static struct btrace_frame_cache *
1533 bfcache_new (struct frame_info *frame)
1535 struct btrace_frame_cache *cache;
1538 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1539 cache->frame = frame;
1541 slot = htab_find_slot (bfcache, cache, INSERT);
1542 gdb_assert (*slot == NULL);
1548 /* Extract the branch trace function from a branch trace frame. */
1550 static const struct btrace_function *
1551 btrace_get_frame_function (struct frame_info *frame)
1553 const struct btrace_frame_cache *cache;
1554 struct btrace_frame_cache pattern;
1557 pattern.frame = frame;
1559 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1563 cache = (const struct btrace_frame_cache *) *slot;
1567 /* Implement stop_reason method for record_btrace_frame_unwind. */
1569 static enum unwind_stop_reason
1570 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1573 const struct btrace_frame_cache *cache;
1574 const struct btrace_function *bfun;
1576 cache = (const struct btrace_frame_cache *) *this_cache;
1578 gdb_assert (bfun != NULL);
1581 return UNWIND_UNAVAILABLE;
1583 return UNWIND_NO_REASON;
1586 /* Implement this_id method for record_btrace_frame_unwind. */
1589 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1590 struct frame_id *this_id)
1592 const struct btrace_frame_cache *cache;
1593 const struct btrace_function *bfun;
1594 struct btrace_call_iterator it;
1595 CORE_ADDR code, special;
1597 cache = (const struct btrace_frame_cache *) *this_cache;
1600 gdb_assert (bfun != NULL);
1602 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1603 bfun = btrace_call_get (&it);
1605 code = get_frame_func (this_frame);
1606 special = bfun->number;
1608 *this_id = frame_id_build_unavailable_stack_special (code, special);
1610 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1611 btrace_get_bfun_name (cache->bfun),
1612 core_addr_to_string_nz (this_id->code_addr),
1613 core_addr_to_string_nz (this_id->special_addr));
1616 /* Implement prev_register method for record_btrace_frame_unwind. */
1618 static struct value *
1619 record_btrace_frame_prev_register (struct frame_info *this_frame,
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun, *caller;
1625 struct btrace_call_iterator it;
1626 struct gdbarch *gdbarch;
1630 gdbarch = get_frame_arch (this_frame);
1631 pcreg = gdbarch_pc_regnum (gdbarch);
1632 if (pcreg < 0 || regnum != pcreg)
1633 throw_error (NOT_AVAILABLE_ERROR,
1634 _("Registers are not available in btrace record history"));
1636 cache = (const struct btrace_frame_cache *) *this_cache;
1638 gdb_assert (bfun != NULL);
1640 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1641 throw_error (NOT_AVAILABLE_ERROR,
1642 _("No caller in btrace record history"));
1644 caller = btrace_call_get (&it);
1646 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1647 pc = caller->insn.front ().pc;
1650 pc = caller->insn.back ().pc;
1651 pc += gdb_insn_length (gdbarch, pc);
1654 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1655 btrace_get_bfun_name (bfun), bfun->level,
1656 core_addr_to_string_nz (pc));
1658 return frame_unwind_got_address (this_frame, regnum, pc);
1661 /* Implement sniffer method for record_btrace_frame_unwind. */
1664 record_btrace_frame_sniffer (const struct frame_unwind *self,
1665 struct frame_info *this_frame,
1668 const struct btrace_function *bfun;
1669 struct btrace_frame_cache *cache;
1670 struct thread_info *tp;
1671 struct frame_info *next;
1673 /* THIS_FRAME does not contain a reference to its thread. */
1674 tp = find_thread_ptid (inferior_ptid);
1675 gdb_assert (tp != NULL);
1678 next = get_next_frame (this_frame);
1681 const struct btrace_insn_iterator *replay;
1683 replay = tp->btrace.replay;
1685 bfun = &replay->btinfo->functions[replay->call_index];
1689 const struct btrace_function *callee;
1690 struct btrace_call_iterator it;
1692 callee = btrace_get_frame_function (next);
1693 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1696 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1699 bfun = btrace_call_get (&it);
1705 DEBUG ("[frame] sniffed frame for %s on level %d",
1706 btrace_get_bfun_name (bfun), bfun->level);
1708 /* This is our frame. Initialize the frame cache. */
1709 cache = bfcache_new (this_frame);
1713 *this_cache = cache;
1717 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1720 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1721 struct frame_info *this_frame,
1724 const struct btrace_function *bfun, *callee;
1725 struct btrace_frame_cache *cache;
1726 struct btrace_call_iterator it;
1727 struct frame_info *next;
1728 struct thread_info *tinfo;
1730 next = get_next_frame (this_frame);
1734 callee = btrace_get_frame_function (next);
1738 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1741 tinfo = find_thread_ptid (inferior_ptid);
1742 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1745 bfun = btrace_call_get (&it);
1747 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1748 btrace_get_bfun_name (bfun), bfun->level);
1750 /* This is our frame. Initialize the frame cache. */
1751 cache = bfcache_new (this_frame);
1755 *this_cache = cache;
1760 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1762 struct btrace_frame_cache *cache;
1765 cache = (struct btrace_frame_cache *) this_cache;
1767 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1768 gdb_assert (slot != NULL);
1770 htab_remove_elt (bfcache, cache);
1773 /* btrace recording does not store previous memory content, neither the stack
1774 frames content. Any unwinding would return errorneous results as the stack
1775 contents no longer matches the changed PC value restored from history.
1776 Therefore this unwinder reports any possibly unwound registers as
1779 const struct frame_unwind record_btrace_frame_unwind =
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1786 record_btrace_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
1790 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1793 record_btrace_frame_unwind_stop_reason,
1794 record_btrace_frame_this_id,
1795 record_btrace_frame_prev_register,
1797 record_btrace_tailcall_frame_sniffer,
1798 record_btrace_frame_dealloc_cache
1801 /* Implement the to_get_unwinder method. */
1803 static const struct frame_unwind *
1804 record_btrace_to_get_unwinder (struct target_ops *self)
1806 return &record_btrace_frame_unwind;
1809 /* Implement the to_get_tailcall_unwinder method. */
1811 static const struct frame_unwind *
1812 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1814 return &record_btrace_tailcall_frame_unwind;
1817 /* Return a human-readable string for FLAG. */
1820 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1828 return "reverse-step";
1834 return "reverse-cont";
1843 /* Indicate that TP should be resumed according to FLAG. */
1846 record_btrace_resume_thread (struct thread_info *tp,
1847 enum btrace_thread_flag flag)
1849 struct btrace_thread_info *btinfo;
1851 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1852 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1854 btinfo = &tp->btrace;
1856 /* Fetch the latest branch trace. */
1859 /* A resume request overwrites a preceding resume or stop request. */
1860 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1861 btinfo->flags |= flag;
1864 /* Get the current frame for TP. */
1866 static struct frame_info *
1867 get_thread_current_frame (struct thread_info *tp)
1869 struct frame_info *frame;
1870 ptid_t old_inferior_ptid;
1873 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1874 old_inferior_ptid = inferior_ptid;
1875 inferior_ptid = tp->ptid;
1877 /* Clear the executing flag to allow changes to the current frame.
1878 We are not actually running, yet. We just started a reverse execution
1879 command or a record goto command.
1880 For the latter, EXECUTING is false and this has no effect.
1881 For the former, EXECUTING is true and we're in to_wait, about to
1882 move the thread. Since we need to recompute the stack, we temporarily
1883 set EXECUTING to flase. */
1884 executing = is_executing (inferior_ptid);
1885 set_executing (inferior_ptid, 0);
1890 frame = get_current_frame ();
1892 CATCH (except, RETURN_MASK_ALL)
1894 /* Restore the previous execution state. */
1895 set_executing (inferior_ptid, executing);
1897 /* Restore the previous inferior_ptid. */
1898 inferior_ptid = old_inferior_ptid;
1900 throw_exception (except);
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1913 /* Start replaying a thread. */
1915 static struct btrace_insn_iterator *
1916 record_btrace_start_replaying (struct thread_info *tp)
1918 struct btrace_insn_iterator *replay;
1919 struct btrace_thread_info *btinfo;
1921 btinfo = &tp->btrace;
1924 /* We can't start replaying without trace. */
1925 if (btinfo->functions.empty ())
1928 /* GDB stores the current frame_id when stepping in order to detects steps
1930 Since frames are computed differently when we're replaying, we need to
1931 recompute those stored frames and fix them up so we can still detect
1932 subroutines after we started replaying. */
1935 struct frame_info *frame;
1936 struct frame_id frame_id;
1937 int upd_step_frame_id, upd_step_stack_frame_id;
1939 /* The current frame without replaying - computed via normal unwind. */
1940 frame = get_thread_current_frame (tp);
1941 frame_id = get_frame_id (frame);
1943 /* Check if we need to update any stepping-related frame id's. */
1944 upd_step_frame_id = frame_id_eq (frame_id,
1945 tp->control.step_frame_id);
1946 upd_step_stack_frame_id = frame_id_eq (frame_id,
1947 tp->control.step_stack_frame_id);
1949 /* We start replaying at the end of the branch trace. This corresponds
1950 to the current instruction. */
1951 replay = XNEW (struct btrace_insn_iterator);
1952 btrace_insn_end (replay, btinfo);
1954 /* Skip gaps at the end of the trace. */
1955 while (btrace_insn_get (replay) == NULL)
1959 steps = btrace_insn_prev (replay, 1);
1961 error (_("No trace."));
1964 /* We're not replaying, yet. */
1965 gdb_assert (btinfo->replay == NULL);
1966 btinfo->replay = replay;
1968 /* Make sure we're not using any stale registers. */
1969 registers_changed_ptid (tp->ptid);
1971 /* The current frame with replaying - computed via btrace unwind. */
1972 frame = get_thread_current_frame (tp);
1973 frame_id = get_frame_id (frame);
1975 /* Replace stepping related frames where necessary. */
1976 if (upd_step_frame_id)
1977 tp->control.step_frame_id = frame_id;
1978 if (upd_step_stack_frame_id)
1979 tp->control.step_stack_frame_id = frame_id;
1981 CATCH (except, RETURN_MASK_ALL)
1983 xfree (btinfo->replay);
1984 btinfo->replay = NULL;
1986 registers_changed_ptid (tp->ptid);
1988 throw_exception (except);
1995 /* Stop replaying a thread. */
1998 record_btrace_stop_replaying (struct thread_info *tp)
2000 struct btrace_thread_info *btinfo;
2002 btinfo = &tp->btrace;
2004 xfree (btinfo->replay);
2005 btinfo->replay = NULL;
2007 /* Make sure we're not leaving any stale registers. */
2008 registers_changed_ptid (tp->ptid);
2011 /* Stop replaying TP if it is at the end of its execution history. */
2014 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2016 struct btrace_insn_iterator *replay, end;
2017 struct btrace_thread_info *btinfo;
2019 btinfo = &tp->btrace;
2020 replay = btinfo->replay;
2025 btrace_insn_end (&end, btinfo);
2027 if (btrace_insn_cmp (replay, &end) == 0)
2028 record_btrace_stop_replaying (tp);
2031 /* The to_resume method of target record-btrace. */
2034 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2035 enum gdb_signal signal)
2037 struct thread_info *tp;
2038 enum btrace_thread_flag flag, cflag;
2040 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2041 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2042 step ? "step" : "cont");
2044 /* Store the execution direction of the last resume.
2046 If there is more than one to_resume call, we have to rely on infrun
2047 to not change the execution direction in-between. */
2048 record_btrace_resume_exec_dir = execution_direction;
2050 /* As long as we're not replaying, just forward the request.
2052 For non-stop targets this means that no thread is replaying. In order to
2053 make progress, we may need to explicitly move replaying threads to the end
2054 of their execution history. */
2055 if ((execution_direction != EXEC_REVERSE)
2056 && !record_btrace_is_replaying (ops, minus_one_ptid))
2059 ops->to_resume (ops, ptid, step, signal);
2063 /* Compute the btrace thread flag for the requested move. */
2064 if (execution_direction == EXEC_REVERSE)
2066 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2071 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2075 /* We just indicate the resume intent here. The actual stepping happens in
2076 record_btrace_wait below.
2078 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2079 if (!target_is_non_stop_p ())
2081 gdb_assert (ptid_match (inferior_ptid, ptid));
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2086 if (ptid_match (tp->ptid, inferior_ptid))
2087 record_btrace_resume_thread (tp, flag);
2089 record_btrace_resume_thread (tp, cflag);
2094 ALL_NON_EXITED_THREADS (tp)
2095 if (ptid_match (tp->ptid, ptid))
2096 record_btrace_resume_thread (tp, flag);
2099 /* Async support. */
2100 if (target_can_async_p ())
2103 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2107 /* The to_commit_resume method of target record-btrace. */
2110 record_btrace_commit_resume (struct target_ops *ops)
2112 if ((execution_direction != EXEC_REVERSE)
2113 && !record_btrace_is_replaying (ops, minus_one_ptid))
2114 ops->beneath->to_commit_resume (ops->beneath);
2117 /* Cancel resuming TP. */
2120 record_btrace_cancel_resume (struct thread_info *tp)
2122 enum btrace_thread_flag flags;
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2128 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2129 print_thread_id (tp),
2130 target_pid_to_str (tp->ptid), flags,
2131 btrace_thread_flag_to_str (flags));
2133 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2134 record_btrace_stop_replaying_at_end (tp);
2137 /* Return a target_waitstatus indicating that we ran out of history. */
2139 static struct target_waitstatus
2140 btrace_step_no_history (void)
2142 struct target_waitstatus status;
2144 status.kind = TARGET_WAITKIND_NO_HISTORY;
2149 /* Return a target_waitstatus indicating that a step finished. */
2151 static struct target_waitstatus
2152 btrace_step_stopped (void)
2154 struct target_waitstatus status;
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_TRAP;
2162 /* Return a target_waitstatus indicating that a thread was stopped as
2165 static struct target_waitstatus
2166 btrace_step_stopped_on_request (void)
2168 struct target_waitstatus status;
2170 status.kind = TARGET_WAITKIND_STOPPED;
2171 status.value.sig = GDB_SIGNAL_0;
2176 /* Return a target_waitstatus indicating a spurious stop. */
2178 static struct target_waitstatus
2179 btrace_step_spurious (void)
2181 struct target_waitstatus status;
2183 status.kind = TARGET_WAITKIND_SPURIOUS;
2188 /* Return a target_waitstatus indicating that the thread was not resumed. */
2190 static struct target_waitstatus
2191 btrace_step_no_resumed (void)
2193 struct target_waitstatus status;
2195 status.kind = TARGET_WAITKIND_NO_RESUMED;
2200 /* Return a target_waitstatus indicating that we should wait again. */
2202 static struct target_waitstatus
2203 btrace_step_again (void)
2205 struct target_waitstatus status;
2207 status.kind = TARGET_WAITKIND_IGNORE;
2212 /* Clear the record histories. */
2215 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2217 xfree (btinfo->insn_history);
2218 xfree (btinfo->call_history);
2220 btinfo->insn_history = NULL;
2221 btinfo->call_history = NULL;
2224 /* Check whether TP's current replay position is at a breakpoint. */
2227 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2229 struct btrace_insn_iterator *replay;
2230 struct btrace_thread_info *btinfo;
2231 const struct btrace_insn *insn;
2232 struct inferior *inf;
2234 btinfo = &tp->btrace;
2235 replay = btinfo->replay;
2240 insn = btrace_insn_get (replay);
2244 inf = find_inferior_ptid (tp->ptid);
2248 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2249 &btinfo->stop_reason);
2252 /* Step one instruction in forward direction. */
2254 static struct target_waitstatus
2255 record_btrace_single_step_forward (struct thread_info *tp)
2257 struct btrace_insn_iterator *replay, end, start;
2258 struct btrace_thread_info *btinfo;
2260 btinfo = &tp->btrace;
2261 replay = btinfo->replay;
2263 /* We're done if we're not replaying. */
2265 return btrace_step_no_history ();
2267 /* Check if we're stepping a breakpoint. */
2268 if (record_btrace_replay_at_breakpoint (tp))
2269 return btrace_step_stopped ();
2271 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2272 jump back to the instruction at which we started. */
2278 /* We will bail out here if we continue stepping after reaching the end
2279 of the execution history. */
2280 steps = btrace_insn_next (replay, 1);
2284 return btrace_step_no_history ();
2287 while (btrace_insn_get (replay) == NULL);
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
2295 if (btrace_insn_cmp (replay, &end) == 0)
2296 return btrace_step_no_history ();
2298 return btrace_step_spurious ();
2301 /* Step one instruction in backward direction. */
2303 static struct target_waitstatus
2304 record_btrace_single_step_backward (struct thread_info *tp)
2306 struct btrace_insn_iterator *replay, start;
2307 struct btrace_thread_info *btinfo;
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2312 /* Start replaying if we're not already doing so. */
2314 replay = record_btrace_start_replaying (tp);
2316 /* If we can't step any further, we reached the end of the history.
2317 Skip gaps during replay. If we end up at a gap (at the beginning of
2318 the trace), jump back to the instruction at which we started. */
2324 steps = btrace_insn_prev (replay, 1);
2328 return btrace_step_no_history ();
2331 while (btrace_insn_get (replay) == NULL);
2333 /* Check if we're stepping a breakpoint.
2335 For reverse-stepping, this check is after the step. There is logic in
2336 infrun.c that handles reverse-stepping separately. See, for example,
2337 proceed and adjust_pc_after_break.
2339 This code assumes that for reverse-stepping, PC points to the last
2340 de-executed instruction, whereas for forward-stepping PC points to the
2341 next to-be-executed instruction. */
2342 if (record_btrace_replay_at_breakpoint (tp))
2343 return btrace_step_stopped ();
2345 return btrace_step_spurious ();
2348 /* Step a single thread. */
2350 static struct target_waitstatus
2351 record_btrace_step_thread (struct thread_info *tp)
2353 struct btrace_thread_info *btinfo;
2354 struct target_waitstatus status;
2355 enum btrace_thread_flag flags;
2357 btinfo = &tp->btrace;
2359 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2360 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2362 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2363 target_pid_to_str (tp->ptid), flags,
2364 btrace_thread_flag_to_str (flags));
2366 /* We can't step without an execution history. */
2367 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2368 return btrace_step_no_history ();
2373 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2376 return btrace_step_stopped_on_request ();
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2383 return btrace_step_stopped ();
2386 status = record_btrace_single_step_backward (tp);
2387 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2390 return btrace_step_stopped ();
2393 status = record_btrace_single_step_forward (tp);
2394 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2397 btinfo->flags |= flags;
2398 return btrace_step_again ();
2401 status = record_btrace_single_step_backward (tp);
2402 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2405 btinfo->flags |= flags;
2406 return btrace_step_again ();
2409 /* We keep threads moving at the end of their execution history. The to_wait
2410 method will stop the thread for whom the event is reported. */
2411 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2412 btinfo->flags |= flags;
2417 /* A vector of threads. */
2419 typedef struct thread_info * tp_t;
2422 /* Announce further events if necessary. */
2425 record_btrace_maybe_mark_async_event
2426 (const std::vector<thread_info *> &moving,
2427 const std::vector<thread_info *> &no_history)
2429 bool more_moving = !moving.empty ();
2430 bool more_no_history = !no_history.empty ();;
2432 if (!more_moving && !more_no_history)
2436 DEBUG ("movers pending");
2438 if (more_no_history)
2439 DEBUG ("no-history pending");
2441 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2444 /* The to_wait method of target record-btrace. */
2447 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2448 struct target_waitstatus *status, int options)
2450 std::vector<thread_info *> moving;
2451 std::vector<thread_info *> no_history;
2453 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2455 /* As long as we're not replaying, just forward the request. */
2456 if ((execution_direction != EXEC_REVERSE)
2457 && !record_btrace_is_replaying (ops, minus_one_ptid))
2460 return ops->to_wait (ops, ptid, status, options);
2463 /* Keep a work list of moving threads. */
2467 ALL_NON_EXITED_THREADS (tp)
2469 if (ptid_match (tp->ptid, ptid)
2470 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2471 moving.push_back (tp);
2475 if (moving.empty ())
2477 *status = btrace_step_no_resumed ();
2479 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2480 target_waitstatus_to_string (status).c_str ());
2485 /* Step moving threads one by one, one step each, until either one thread
2486 reports an event or we run out of threads to step.
2488 When stepping more than one thread, chances are that some threads reach
2489 the end of their execution history earlier than others. If we reported
2490 this immediately, all-stop on top of non-stop would stop all threads and
2491 resume the same threads next time. And we would report the same thread
2492 having reached the end of its execution history again.
2494 In the worst case, this would starve the other threads. But even if other
2495 threads would be allowed to make progress, this would result in far too
2496 many intermediate stops.
2498 We therefore delay the reporting of "no execution history" until we have
2499 nothing else to report. By this time, all threads should have moved to
2500 either the beginning or the end of their execution history. There will
2501 be a single user-visible stop. */
2502 struct thread_info *eventing = NULL;
2503 while ((eventing == NULL) && !moving.empty ())
2505 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2507 thread_info *tp = moving[ix];
2509 *status = record_btrace_step_thread (tp);
2511 switch (status->kind)
2513 case TARGET_WAITKIND_IGNORE:
2517 case TARGET_WAITKIND_NO_HISTORY:
2518 no_history.push_back (ordered_remove (moving, ix));
2522 eventing = unordered_remove (moving, ix);
2528 if (eventing == NULL)
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!no_history.empty ());
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = unordered_remove (no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2542 *status = btrace_step_no_history ();
2545 gdb_assert (eventing != NULL);
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
2551 /* Stop all other threads. */
2552 if (!target_is_non_stop_p ())
2556 ALL_NON_EXITED_THREADS (tp)
2557 record_btrace_cancel_resume (tp);
2560 /* In async mode, we need to announce further events. */
2561 if (target_is_async_p ())
2562 record_btrace_maybe_mark_async_event (moving, no_history);
2564 /* Start record histories anew from the current position. */
2565 record_btrace_clear_histories (&eventing->btrace);
2567 /* We moved the replay position but did not update registers. */
2568 registers_changed_ptid (eventing->ptid);
2570 DEBUG ("wait ended by thread %s (%s): %s",
2571 print_thread_id (eventing),
2572 target_pid_to_str (eventing->ptid),
2573 target_waitstatus_to_string (status).c_str ());
2575 return eventing->ptid;
2578 /* The to_stop method of target record-btrace. */
2581 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2583 DEBUG ("stop %s", target_pid_to_str (ptid));
2585 /* As long as we're not replaying, just forward the request. */
2586 if ((execution_direction != EXEC_REVERSE)
2587 && !record_btrace_is_replaying (ops, minus_one_ptid))
2590 ops->to_stop (ops, ptid);
2594 struct thread_info *tp;
2596 ALL_NON_EXITED_THREADS (tp)
2597 if (ptid_match (tp->ptid, ptid))
2599 tp->btrace.flags &= ~BTHR_MOVE;
2600 tp->btrace.flags |= BTHR_STOP;
2605 /* The to_can_execute_reverse method of target record-btrace. */
2608 record_btrace_can_execute_reverse (struct target_ops *self)
2613 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2616 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2618 if (record_btrace_is_replaying (ops, minus_one_ptid))
2620 struct thread_info *tp = inferior_thread ();
2622 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2625 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2628 /* The to_supports_stopped_by_sw_breakpoint method of target
2632 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2634 if (record_btrace_is_replaying (ops, minus_one_ptid))
2637 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2640 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2643 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2645 if (record_btrace_is_replaying (ops, minus_one_ptid))
2647 struct thread_info *tp = inferior_thread ();
2649 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2652 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2655 /* The to_supports_stopped_by_hw_breakpoint method of target
2659 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2661 if (record_btrace_is_replaying (ops, minus_one_ptid))
2664 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2667 /* The to_update_thread_list method of target record-btrace. */
2670 record_btrace_update_thread_list (struct target_ops *ops)
2672 /* We don't add or remove threads during replay. */
2673 if (record_btrace_is_replaying (ops, minus_one_ptid))
2676 /* Forward the request. */
2678 ops->to_update_thread_list (ops);
2681 /* The to_thread_alive method of target record-btrace. */
2684 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2686 /* We don't add or remove threads during replay. */
2687 if (record_btrace_is_replaying (ops, minus_one_ptid))
2688 return find_thread_ptid (ptid) != NULL;
2690 /* Forward the request. */
2692 return ops->to_thread_alive (ops, ptid);
2695 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2699 record_btrace_set_replay (struct thread_info *tp,
2700 const struct btrace_insn_iterator *it)
2702 struct btrace_thread_info *btinfo;
2704 btinfo = &tp->btrace;
2707 record_btrace_stop_replaying (tp);
2710 if (btinfo->replay == NULL)
2711 record_btrace_start_replaying (tp);
2712 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2715 *btinfo->replay = *it;
2716 registers_changed_ptid (tp->ptid);
2719 /* Start anew from the new replay position. */
2720 record_btrace_clear_histories (btinfo);
2722 stop_pc = regcache_read_pc (get_current_regcache ());
2723 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2726 /* The to_goto_record_begin method of target record-btrace. */
2729 record_btrace_goto_begin (struct target_ops *self)
2731 struct thread_info *tp;
2732 struct btrace_insn_iterator begin;
2734 tp = require_btrace_thread ();
2736 btrace_insn_begin (&begin, &tp->btrace);
2738 /* Skip gaps at the beginning of the trace. */
2739 while (btrace_insn_get (&begin) == NULL)
2743 steps = btrace_insn_next (&begin, 1);
2745 error (_("No trace."));
2748 record_btrace_set_replay (tp, &begin);
2751 /* The to_goto_record_end method of target record-btrace. */
2754 record_btrace_goto_end (struct target_ops *ops)
2756 struct thread_info *tp;
2758 tp = require_btrace_thread ();
2760 record_btrace_set_replay (tp, NULL);
2763 /* The to_goto_record method of target record-btrace. */
2766 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2768 struct thread_info *tp;
2769 struct btrace_insn_iterator it;
2770 unsigned int number;
2775 /* Check for wrap-arounds. */
2777 error (_("Instruction number out of range."));
2779 tp = require_btrace_thread ();
2781 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2783 /* Check if the instruction could not be found or is a gap. */
2784 if (found == 0 || btrace_insn_get (&it) == NULL)
2785 error (_("No such instruction."));
2787 record_btrace_set_replay (tp, &it);
2790 /* The to_record_stop_replaying method of target record-btrace. */
2793 record_btrace_stop_replaying_all (struct target_ops *self)
2795 struct thread_info *tp;
2797 ALL_NON_EXITED_THREADS (tp)
2798 record_btrace_stop_replaying (tp);
2801 /* The to_execution_direction target method. */
2803 static enum exec_direction_kind
2804 record_btrace_execution_direction (struct target_ops *self)
2806 return record_btrace_resume_exec_dir;
2809 /* The to_prepare_to_generate_core target method. */
2812 record_btrace_prepare_to_generate_core (struct target_ops *self)
2814 record_btrace_generating_corefile = 1;
2817 /* The to_done_generating_core target method. */
2820 record_btrace_done_generating_core (struct target_ops *self)
2822 record_btrace_generating_corefile = 0;
2825 /* Initialize the record-btrace target ops. */
2828 init_record_btrace_ops (void)
2830 struct target_ops *ops;
2832 ops = &record_btrace_ops;
2833 ops->to_shortname = "record-btrace";
2834 ops->to_longname = "Branch tracing target";
2835 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2836 ops->to_open = record_btrace_open;
2837 ops->to_close = record_btrace_close;
2838 ops->to_async = record_btrace_async;
2839 ops->to_detach = record_detach;
2840 ops->to_disconnect = record_btrace_disconnect;
2841 ops->to_mourn_inferior = record_mourn_inferior;
2842 ops->to_kill = record_kill;
2843 ops->to_stop_recording = record_btrace_stop_recording;
2844 ops->to_info_record = record_btrace_info;
2845 ops->to_insn_history = record_btrace_insn_history;
2846 ops->to_insn_history_from = record_btrace_insn_history_from;
2847 ops->to_insn_history_range = record_btrace_insn_history_range;
2848 ops->to_call_history = record_btrace_call_history;
2849 ops->to_call_history_from = record_btrace_call_history_from;
2850 ops->to_call_history_range = record_btrace_call_history_range;
2851 ops->to_record_method = record_btrace_record_method;
2852 ops->to_record_is_replaying = record_btrace_is_replaying;
2853 ops->to_record_will_replay = record_btrace_will_replay;
2854 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2855 ops->to_xfer_partial = record_btrace_xfer_partial;
2856 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2857 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2858 ops->to_fetch_registers = record_btrace_fetch_registers;
2859 ops->to_store_registers = record_btrace_store_registers;
2860 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2861 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2862 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2863 ops->to_resume = record_btrace_resume;
2864 ops->to_commit_resume = record_btrace_commit_resume;
2865 ops->to_wait = record_btrace_wait;
2866 ops->to_stop = record_btrace_stop;
2867 ops->to_update_thread_list = record_btrace_update_thread_list;
2868 ops->to_thread_alive = record_btrace_thread_alive;
2869 ops->to_goto_record_begin = record_btrace_goto_begin;
2870 ops->to_goto_record_end = record_btrace_goto_end;
2871 ops->to_goto_record = record_btrace_goto;
2872 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2873 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2874 ops->to_supports_stopped_by_sw_breakpoint
2875 = record_btrace_supports_stopped_by_sw_breakpoint;
2876 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2877 ops->to_supports_stopped_by_hw_breakpoint
2878 = record_btrace_supports_stopped_by_hw_breakpoint;
2879 ops->to_execution_direction = record_btrace_execution_direction;
2880 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2881 ops->to_done_generating_core = record_btrace_done_generating_core;
2882 ops->to_stratum = record_stratum;
2883 ops->to_magic = OPS_MAGIC;
2886 /* Start recording in BTS format. */
2889 cmd_record_btrace_bts_start (const char *args, int from_tty)
2891 if (args != NULL && *args != 0)
2892 error (_("Invalid argument."));
2894 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2898 execute_command ("target record-btrace", from_tty);
2900 CATCH (exception, RETURN_MASK_ALL)
2902 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2903 throw_exception (exception);
2908 /* Start recording in Intel Processor Trace format. */
2911 cmd_record_btrace_pt_start (const char *args, int from_tty)
2913 if (args != NULL && *args != 0)
2914 error (_("Invalid argument."));
2916 record_btrace_conf.format = BTRACE_FORMAT_PT;
2920 execute_command ("target record-btrace", from_tty);
2922 CATCH (exception, RETURN_MASK_ALL)
2924 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2925 throw_exception (exception);
2930 /* Alias for "target record". */
2933 cmd_record_btrace_start (const char *args, int from_tty)
2935 if (args != NULL && *args != 0)
2936 error (_("Invalid argument."));
2938 record_btrace_conf.format = BTRACE_FORMAT_PT;
2942 execute_command ("target record-btrace", from_tty);
2944 CATCH (exception, RETURN_MASK_ALL)
2946 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2950 execute_command ("target record-btrace", from_tty);
2952 CATCH (exception, RETURN_MASK_ALL)
2954 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2955 throw_exception (exception);
2962 /* The "set record btrace" command. */
2965 cmd_set_record_btrace (const char *args, int from_tty)
2967 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2970 /* The "show record btrace" command. */
2973 cmd_show_record_btrace (const char *args, int from_tty)
2975 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2978 /* The "show record btrace replay-memory-access" command. */
2981 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2982 struct cmd_list_element *c, const char *value)
2984 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2985 replay_memory_access);
2988 /* The "set record btrace bts" command. */
2991 cmd_set_record_btrace_bts (const char *args, int from_tty)
2993 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2994 "by an appropriate subcommand.\n"));
2995 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2996 all_commands, gdb_stdout);
2999 /* The "show record btrace bts" command. */
3002 cmd_show_record_btrace_bts (const char *args, int from_tty)
3004 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3007 /* The "set record btrace pt" command. */
3010 cmd_set_record_btrace_pt (const char *args, int from_tty)
3012 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3013 "by an appropriate subcommand.\n"));
3014 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3015 all_commands, gdb_stdout);
3018 /* The "show record btrace pt" command. */
3021 cmd_show_record_btrace_pt (const char *args, int from_tty)
3023 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3026 /* The "record bts buffer-size" show value function. */
3029 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3030 struct cmd_list_element *c,
3033 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3037 /* The "record pt buffer-size" show value function. */
3040 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3041 struct cmd_list_element *c,
3044 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3048 /* Initialize btrace commands. */
3051 _initialize_record_btrace (void)
3053 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3054 _("Start branch trace recording."), &record_btrace_cmdlist,
3055 "record btrace ", 0, &record_cmdlist);
3056 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3058 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3060 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3061 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3062 This format may not be available on all processors."),
3063 &record_btrace_cmdlist);
3064 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3066 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3068 Start branch trace recording in Intel Processor Trace format.\n\n\
3069 This format may not be available on all processors."),
3070 &record_btrace_cmdlist);
3071 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3073 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3074 _("Set record options"), &set_record_btrace_cmdlist,
3075 "set record btrace ", 0, &set_record_cmdlist);
3077 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3078 _("Show record options"), &show_record_btrace_cmdlist,
3079 "show record btrace ", 0, &show_record_cmdlist);
3081 add_setshow_enum_cmd ("replay-memory-access", no_class,
3082 replay_memory_access_types, &replay_memory_access, _("\
3083 Set what memory accesses are allowed during replay."), _("\
3084 Show what memory accesses are allowed during replay."),
3085 _("Default is READ-ONLY.\n\n\
3086 The btrace record target does not trace data.\n\
3087 The memory therefore corresponds to the live target and not \
3088 to the current replay position.\n\n\
3089 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3090 When READ-WRITE, allow accesses to read-only and read-write memory during \
3092 NULL, cmd_show_replay_memory_access,
3093 &set_record_btrace_cmdlist,
3094 &show_record_btrace_cmdlist);
3096 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3097 _("Set record btrace bts options"),
3098 &set_record_btrace_bts_cmdlist,
3099 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3101 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3102 _("Show record btrace bts options"),
3103 &show_record_btrace_bts_cmdlist,
3104 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3106 add_setshow_uinteger_cmd ("buffer-size", no_class,
3107 &record_btrace_conf.bts.size,
3108 _("Set the record/replay bts buffer size."),
3109 _("Show the record/replay bts buffer size."), _("\
3110 When starting recording request a trace buffer of this size. \
3111 The actual buffer size may differ from the requested size. \
3112 Use \"info record\" to see the actual buffer size.\n\n\
3113 Bigger buffers allow longer recording but also take more time to process \
3114 the recorded execution trace.\n\n\
3115 The trace buffer size may not be changed while recording."), NULL,
3116 show_record_bts_buffer_size_value,
3117 &set_record_btrace_bts_cmdlist,
3118 &show_record_btrace_bts_cmdlist);
3120 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3121 _("Set record btrace pt options"),
3122 &set_record_btrace_pt_cmdlist,
3123 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3125 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3126 _("Show record btrace pt options"),
3127 &show_record_btrace_pt_cmdlist,
3128 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3130 add_setshow_uinteger_cmd ("buffer-size", no_class,
3131 &record_btrace_conf.pt.size,
3132 _("Set the record/replay pt buffer size."),
3133 _("Show the record/replay pt buffer size."), _("\
3134 Bigger buffers allow longer recording but also take more time to process \
3135 the recorded execution.\n\
3136 The actual buffer size may differ from the requested size. Use \"info record\" \
3137 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3138 &set_record_btrace_pt_cmdlist,
3139 &show_record_btrace_pt_cmdlist);
3141 init_record_btrace_ops ();
3142 add_target (&record_btrace_ops);
3144 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3147 record_btrace_conf.bts.size = 64 * 1024;
3148 record_btrace_conf.pt.size = 16 * 1024;