1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
41 #include "gdbsupport/vec.h"
46 static const target_info record_btrace_target_info = {
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
52 /* The target_ops of record-btrace. */
54 class record_btrace_target final : public target_ops
57 const target_info &info () const override
58 { return record_btrace_target_info; }
60 strata stratum () const override { return record_stratum; }
62 void close () override;
63 void async (int) override;
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
68 void disconnect (const char *, int) override;
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
74 { record_kill (this); }
76 enum record_method record_method (ptid_t ptid) override;
78 void stop_recording () override;
79 void info_record () override;
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
96 enum target_xfer_status xfer_partial (enum target_object object,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
108 void fetch_registers (struct regcache *, int) override;
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
113 const struct frame_unwind *get_unwinder () override;
115 const struct frame_unwind *get_tailcall_unwinder () override;
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
128 bool can_execute_reverse () override;
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
141 static record_btrace_target record_btrace_ops;
143 /* Initialize the record-btrace target ops. */
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token {};
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
209 #define DEBUG(msg, args...) \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
224 switch (record_btrace_cpu_state)
230 record_btrace_cpu.vendor = CV_UNKNOWN;
233 return &record_btrace_cpu;
236 error (_("Internal error: bad record btrace cpu state."));
239 /* Update the branch trace for the current thread and return a pointer to its
242 Throws an error if there is no thread or no trace. This function never
245 static struct thread_info *
246 require_btrace_thread (void)
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
253 thread_info *tp = inferior_thread ();
255 validate_registers_access ();
257 btrace_fetch (tp, record_btrace_get_cpu ());
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
268 Throws an error if there is no thread or no trace. This function never
271 static struct btrace_thread_info *
272 require_btrace (void)
274 struct thread_info *tp;
276 tp = require_btrace_thread ();
281 /* Enable branch tracing for one thread. Warn on errors. */
284 record_btrace_enable_warn (struct thread_info *tp)
288 btrace_enable (tp, &record_btrace_conf);
290 catch (const gdb_exception_error &error)
292 warning ("%s", error.what ());
296 /* Enable automatic tracing of new threads. */
299 record_btrace_auto_enable (void)
301 DEBUG ("attach thread observer");
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
307 /* Disable automatic tracing of new threads. */
310 record_btrace_auto_disable (void)
312 DEBUG ("detach thread observer");
314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
317 /* The record-btrace async event handler function. */
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 inferior_event_handler (INF_REG_EVENT, NULL);
325 /* See record-btrace.h. */
328 record_btrace_push_target (void)
332 record_btrace_auto_enable ();
334 push_target (&record_btrace_ops);
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 record_btrace_generating_corefile = 0;
341 format = btrace_format_short_string (record_btrace_conf.format);
342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
345 /* Disable btrace on a set of threads on scope exit. */
347 struct scoped_btrace_disable
349 scoped_btrace_disable () = default;
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353 ~scoped_btrace_disable ()
355 for (thread_info *tp : m_threads)
359 void add_thread (thread_info *thread)
361 m_threads.push_front (thread);
370 std::forward_list<thread_info *> m_threads;
373 /* Open target record-btrace. */
376 record_btrace_target_open (const char *args, int from_tty)
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
386 if (!target_has_execution)
387 error (_("The program is not being run."));
389 for (thread_info *tp : all_non_exited_threads ())
390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
392 btrace_enable (tp, &record_btrace_conf);
394 btrace_disable.add_thread (tp);
397 record_btrace_push_target ();
399 btrace_disable.discard ();
402 /* The stop_recording method of target record-btrace. */
405 record_btrace_target::stop_recording ()
407 DEBUG ("stop recording");
409 record_btrace_auto_disable ();
411 for (thread_info *tp : all_non_exited_threads ())
412 if (tp->btrace.target != NULL)
416 /* The disconnect method of target record-btrace. */
419 record_btrace_target::disconnect (const char *args,
422 struct target_ops *beneath = this->beneath ();
424 /* Do not stop recording, just clean up GDB side. */
425 unpush_target (this);
427 /* Forward disconnect. */
428 beneath->disconnect (args, from_tty);
431 /* The close method of target record-btrace. */
434 record_btrace_target::close ()
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
445 for (thread_info *tp : all_non_exited_threads ())
446 btrace_teardown (tp);
449 /* The async method of target record-btrace. */
452 record_btrace_target::async (int enable)
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459 this->beneath ()->async (enable);
462 /* Adjusts the size and returns a human readable size suffix. */
465 record_btrace_adjust_size (unsigned int *size)
471 if ((sz & ((1u << 30) - 1)) == 0)
476 else if ((sz & ((1u << 20) - 1)) == 0)
481 else if ((sz & ((1u << 10) - 1)) == 0)
490 /* Print a BTS configuration. */
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
506 /* Print an Intel Processor Trace configuration. */
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
522 /* Print a branch tracing configuration. */
525 record_btrace_print_conf (const struct btrace_config *conf)
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
530 switch (conf->format)
532 case BTRACE_FORMAT_NONE:
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
547 /* The info_record method of target record-btrace. */
550 record_btrace_target::info_record ()
552 struct btrace_thread_info *btinfo;
553 const struct btrace_config *conf;
554 struct thread_info *tp;
555 unsigned int insns, calls, gaps;
559 tp = find_thread_ptid (inferior_ptid);
561 error (_("No thread."));
563 validate_registers_access ();
565 btinfo = &tp->btrace;
567 conf = ::btrace_conf (btinfo);
569 record_btrace_print_conf (conf);
571 btrace_fetch (tp, record_btrace_get_cpu ());
577 if (!btrace_is_empty (tp))
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
584 calls = btrace_call_number (&call);
586 btrace_insn_end (&insn, btinfo);
587 insns = btrace_insn_number (&insn);
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
594 gaps = btinfo->ngaps;
597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
598 "for thread %s (%s).\n"), insns, calls, gaps,
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
607 /* Print a decode error. */
610 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
613 const char *errstr = btrace_decode_error (format, errcode);
615 uiout->text (_("["));
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
619 uiout->text (_("decode error ("));
620 uiout->field_signed ("errcode", errcode);
621 uiout->text (_("): "));
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
627 /* A range of source lines. */
629 struct btrace_line_range
631 /* The symtab this line is from. */
632 struct symtab *symtab;
634 /* The first line (inclusive). */
637 /* The last line (exclusive). */
641 /* Construct a line range. */
643 static struct btrace_line_range
644 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
646 struct btrace_line_range range;
648 range.symtab = symtab;
655 /* Add a line to a line range. */
657 static struct btrace_line_range
658 btrace_line_range_add (struct btrace_line_range range, int line)
660 if (range.end <= range.begin)
662 /* This is the first entry. */
664 range.end = line + 1;
666 else if (line < range.begin)
668 else if (range.end < line)
674 /* Return non-zero if RANGE is empty, zero otherwise. */
677 btrace_line_range_is_empty (struct btrace_line_range range)
679 return range.end <= range.begin;
682 /* Return non-zero if LHS contains RHS, zero otherwise. */
685 btrace_line_range_contains_range (struct btrace_line_range lhs,
686 struct btrace_line_range rhs)
688 return ((lhs.symtab == rhs.symtab)
689 && (lhs.begin <= rhs.begin)
690 && (rhs.end <= lhs.end));
693 /* Find the line range associated with PC. */
695 static struct btrace_line_range
696 btrace_find_line_range (CORE_ADDR pc)
698 struct btrace_line_range range;
699 struct linetable_entry *lines;
700 struct linetable *ltable;
701 struct symtab *symtab;
704 symtab = find_pc_line_symtab (pc);
706 return btrace_mk_line_range (NULL, 0, 0);
708 ltable = SYMTAB_LINETABLE (symtab);
710 return btrace_mk_line_range (symtab, 0, 0);
712 nlines = ltable->nitems;
713 lines = ltable->item;
715 return btrace_mk_line_range (symtab, 0, 0);
717 range = btrace_mk_line_range (symtab, 0, 0);
718 for (i = 0; i < nlines - 1; i++)
720 if ((lines[i].pc == pc) && (lines[i].line != 0))
721 range = btrace_line_range_add (range, lines[i].line);
727 /* Print source lines in LINES to UIOUT.
729 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
730 instructions corresponding to that source line. When printing a new source
731 line, we do the cleanups for the open chain and open a new cleanup chain for
732 the new source line. If the source line range in LINES is not empty, this
733 function will leave the cleanup chain for the last printed source line open
734 so instructions can be added to it. */
737 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
738 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
739 gdb::optional<ui_out_emit_list> *asm_list,
740 gdb_disassembly_flags flags)
742 print_source_lines_flags psl_flags;
744 if (flags & DISASSEMBLY_FILENAME)
745 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
747 for (int line = lines.begin; line < lines.end; ++line)
751 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
753 print_source_lines (lines.symtab, line, line + 1, psl_flags);
755 asm_list->emplace (uiout, "line_asm_insn");
759 /* Disassemble a section of the recorded instruction trace. */
762 btrace_insn_history (struct ui_out *uiout,
763 const struct btrace_thread_info *btinfo,
764 const struct btrace_insn_iterator *begin,
765 const struct btrace_insn_iterator *end,
766 gdb_disassembly_flags flags)
768 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
769 btrace_insn_number (begin), btrace_insn_number (end));
771 flags |= DISASSEMBLY_SPECULATIVE;
773 struct gdbarch *gdbarch = target_gdbarch ();
774 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
776 ui_out_emit_list list_emitter (uiout, "asm_insns");
778 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
779 gdb::optional<ui_out_emit_list> asm_list;
781 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
783 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
784 btrace_insn_next (&it, 1))
786 const struct btrace_insn *insn;
788 insn = btrace_insn_get (&it);
790 /* A NULL instruction indicates a gap in the trace. */
793 const struct btrace_config *conf;
795 conf = btrace_conf (btinfo);
797 /* We have trace so we must have a configuration. */
798 gdb_assert (conf != NULL);
800 uiout->field_fmt ("insn-number", "%u",
801 btrace_insn_number (&it));
804 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
809 struct disasm_insn dinsn;
811 if ((flags & DISASSEMBLY_SOURCE) != 0)
813 struct btrace_line_range lines;
815 lines = btrace_find_line_range (insn->pc);
816 if (!btrace_line_range_is_empty (lines)
817 && !btrace_line_range_contains_range (last_lines, lines))
819 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
823 else if (!src_and_asm_tuple.has_value ())
825 gdb_assert (!asm_list.has_value ());
827 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
829 /* No source information. */
830 asm_list.emplace (uiout, "line_asm_insn");
833 gdb_assert (src_and_asm_tuple.has_value ());
834 gdb_assert (asm_list.has_value ());
837 memset (&dinsn, 0, sizeof (dinsn));
838 dinsn.number = btrace_insn_number (&it);
839 dinsn.addr = insn->pc;
841 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
842 dinsn.is_speculative = 1;
844 disasm.pretty_print_insn (&dinsn, flags);
849 /* The insn_history method of target record-btrace. */
852 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
854 struct btrace_thread_info *btinfo;
855 struct btrace_insn_history *history;
856 struct btrace_insn_iterator begin, end;
857 struct ui_out *uiout;
858 unsigned int context, covered;
860 uiout = current_uiout;
861 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
862 context = abs (size);
864 error (_("Bad record instruction-history-size."));
866 btinfo = require_btrace ();
867 history = btinfo->insn_history;
870 struct btrace_insn_iterator *replay;
872 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
874 /* If we're replaying, we start at the replay position. Otherwise, we
875 start at the tail of the trace. */
876 replay = btinfo->replay;
880 btrace_insn_end (&begin, btinfo);
882 /* We start from here and expand in the requested direction. Then we
883 expand in the other direction, as well, to fill up any remaining
888 /* We want the current position covered, as well. */
889 covered = btrace_insn_next (&end, 1);
890 covered += btrace_insn_prev (&begin, context - covered);
891 covered += btrace_insn_next (&end, context - covered);
895 covered = btrace_insn_next (&end, context);
896 covered += btrace_insn_prev (&begin, context - covered);
901 begin = history->begin;
904 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
905 btrace_insn_number (&begin), btrace_insn_number (&end));
910 covered = btrace_insn_prev (&begin, context);
915 covered = btrace_insn_next (&end, context);
920 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
924 printf_unfiltered (_("At the start of the branch trace record.\n"));
926 printf_unfiltered (_("At the end of the branch trace record.\n"));
929 btrace_set_insn_history (btinfo, &begin, &end);
932 /* The insn_history_range method of target record-btrace. */
935 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
936 gdb_disassembly_flags flags)
938 struct btrace_thread_info *btinfo;
939 struct btrace_insn_iterator begin, end;
940 struct ui_out *uiout;
941 unsigned int low, high;
944 uiout = current_uiout;
945 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
949 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
951 /* Check for wrap-arounds. */
952 if (low != from || high != to)
953 error (_("Bad range."));
956 error (_("Bad range."));
958 btinfo = require_btrace ();
960 found = btrace_find_insn_by_number (&begin, btinfo, low);
962 error (_("Range out of bounds."));
964 found = btrace_find_insn_by_number (&end, btinfo, high);
967 /* Silently truncate the range. */
968 btrace_insn_end (&end, btinfo);
972 /* We want both begin and end to be inclusive. */
973 btrace_insn_next (&end, 1);
976 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
977 btrace_set_insn_history (btinfo, &begin, &end);
980 /* The insn_history_from method of target record-btrace. */
983 record_btrace_target::insn_history_from (ULONGEST from, int size,
984 gdb_disassembly_flags flags)
986 ULONGEST begin, end, context;
988 context = abs (size);
990 error (_("Bad record instruction-history-size."));
999 begin = from - context + 1;
1004 end = from + context - 1;
1006 /* Check for wrap-around. */
1011 insn_history_range (begin, end, flags);
1014 /* Print the instruction number range for a function call history line. */
1017 btrace_call_history_insn_range (struct ui_out *uiout,
1018 const struct btrace_function *bfun)
1020 unsigned int begin, end, size;
1022 size = bfun->insn.size ();
1023 gdb_assert (size > 0);
1025 begin = bfun->insn_offset;
1026 end = begin + size - 1;
1028 uiout->field_unsigned ("insn begin", begin);
1030 uiout->field_unsigned ("insn end", end);
1033 /* Compute the lowest and highest source line for the instructions in BFUN
1034 and return them in PBEGIN and PEND.
1035 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1036 result from inlining or macro expansion. */
1039 btrace_compute_src_line_range (const struct btrace_function *bfun,
1040 int *pbegin, int *pend)
1042 struct symtab *symtab;
1053 symtab = symbol_symtab (sym);
1055 for (const btrace_insn &insn : bfun->insn)
1057 struct symtab_and_line sal;
1059 sal = find_pc_line (insn.pc, 0);
1060 if (sal.symtab != symtab || sal.line == 0)
1063 begin = std::min (begin, sal.line);
1064 end = std::max (end, sal.line);
1072 /* Print the source line information for a function call history line. */
1075 btrace_call_history_src_line (struct ui_out *uiout,
1076 const struct btrace_function *bfun)
1085 uiout->field_string ("file",
1086 symtab_to_filename_for_display (symbol_symtab (sym)),
1087 ui_out_style_kind::FILE);
1089 btrace_compute_src_line_range (bfun, &begin, &end);
1094 uiout->field_signed ("min line", begin);
1100 uiout->field_signed ("max line", end);
1103 /* Get the name of a branch trace function. */
1106 btrace_get_bfun_name (const struct btrace_function *bfun)
1108 struct minimal_symbol *msym;
1118 return SYMBOL_PRINT_NAME (sym);
1119 else if (msym != NULL)
1120 return MSYMBOL_PRINT_NAME (msym);
1125 /* Disassemble a section of the recorded function trace. */
1128 btrace_call_history (struct ui_out *uiout,
1129 const struct btrace_thread_info *btinfo,
1130 const struct btrace_call_iterator *begin,
1131 const struct btrace_call_iterator *end,
1134 struct btrace_call_iterator it;
1135 record_print_flags flags = (enum record_print_flag) int_flags;
1137 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1138 btrace_call_number (end));
1140 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1142 const struct btrace_function *bfun;
1143 struct minimal_symbol *msym;
1146 bfun = btrace_call_get (&it);
1150 /* Print the function index. */
1151 uiout->field_unsigned ("index", bfun->number);
1154 /* Indicate gaps in the trace. */
1155 if (bfun->errcode != 0)
1157 const struct btrace_config *conf;
1159 conf = btrace_conf (btinfo);
1161 /* We have trace so we must have a configuration. */
1162 gdb_assert (conf != NULL);
1164 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1169 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1171 int level = bfun->level + btinfo->level, i;
1173 for (i = 0; i < level; ++i)
1178 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1179 ui_out_style_kind::FUNCTION);
1180 else if (msym != NULL)
1181 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1182 ui_out_style_kind::FUNCTION);
1183 else if (!uiout->is_mi_like_p ())
1184 uiout->field_string ("function", "??",
1185 ui_out_style_kind::FUNCTION);
1187 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1189 uiout->text (_("\tinst "));
1190 btrace_call_history_insn_range (uiout, bfun);
1193 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1195 uiout->text (_("\tat "));
1196 btrace_call_history_src_line (uiout, bfun);
1203 /* The call_history method of target record-btrace. */
1206 record_btrace_target::call_history (int size, record_print_flags flags)
1208 struct btrace_thread_info *btinfo;
1209 struct btrace_call_history *history;
1210 struct btrace_call_iterator begin, end;
1211 struct ui_out *uiout;
1212 unsigned int context, covered;
1214 uiout = current_uiout;
1215 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1216 context = abs (size);
1218 error (_("Bad record function-call-history-size."));
1220 btinfo = require_btrace ();
1221 history = btinfo->call_history;
1222 if (history == NULL)
1224 struct btrace_insn_iterator *replay;
1226 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1228 /* If we're replaying, we start at the replay position. Otherwise, we
1229 start at the tail of the trace. */
1230 replay = btinfo->replay;
1233 begin.btinfo = btinfo;
1234 begin.index = replay->call_index;
1237 btrace_call_end (&begin, btinfo);
1239 /* We start from here and expand in the requested direction. Then we
1240 expand in the other direction, as well, to fill up any remaining
1245 /* We want the current position covered, as well. */
1246 covered = btrace_call_next (&end, 1);
1247 covered += btrace_call_prev (&begin, context - covered);
1248 covered += btrace_call_next (&end, context - covered);
1252 covered = btrace_call_next (&end, context);
1253 covered += btrace_call_prev (&begin, context- covered);
1258 begin = history->begin;
1261 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1262 btrace_call_number (&begin), btrace_call_number (&end));
1267 covered = btrace_call_prev (&begin, context);
1272 covered = btrace_call_next (&end, context);
1277 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1281 printf_unfiltered (_("At the start of the branch trace record.\n"));
1283 printf_unfiltered (_("At the end of the branch trace record.\n"));
1286 btrace_set_call_history (btinfo, &begin, &end);
1289 /* The call_history_range method of target record-btrace. */
1292 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1293 record_print_flags flags)
1295 struct btrace_thread_info *btinfo;
1296 struct btrace_call_iterator begin, end;
1297 struct ui_out *uiout;
1298 unsigned int low, high;
1301 uiout = current_uiout;
1302 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1306 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1308 /* Check for wrap-arounds. */
1309 if (low != from || high != to)
1310 error (_("Bad range."));
1313 error (_("Bad range."));
1315 btinfo = require_btrace ();
1317 found = btrace_find_call_by_number (&begin, btinfo, low);
1319 error (_("Range out of bounds."));
1321 found = btrace_find_call_by_number (&end, btinfo, high);
1324 /* Silently truncate the range. */
1325 btrace_call_end (&end, btinfo);
1329 /* We want both begin and end to be inclusive. */
1330 btrace_call_next (&end, 1);
1333 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1334 btrace_set_call_history (btinfo, &begin, &end);
1337 /* The call_history_from method of target record-btrace. */
1340 record_btrace_target::call_history_from (ULONGEST from, int size,
1341 record_print_flags flags)
1343 ULONGEST begin, end, context;
1345 context = abs (size);
1347 error (_("Bad record function-call-history-size."));
1356 begin = from - context + 1;
1361 end = from + context - 1;
1363 /* Check for wrap-around. */
1368 call_history_range ( begin, end, flags);
1371 /* The record_method method of target record-btrace. */
1374 record_btrace_target::record_method (ptid_t ptid)
1376 struct thread_info * const tp = find_thread_ptid (ptid);
1379 error (_("No thread."));
1381 if (tp->btrace.target == NULL)
1382 return RECORD_METHOD_NONE;
1384 return RECORD_METHOD_BTRACE;
1387 /* The record_is_replaying method of target record-btrace. */
1390 record_btrace_target::record_is_replaying (ptid_t ptid)
1392 for (thread_info *tp : all_non_exited_threads (ptid))
1393 if (btrace_is_replaying (tp))
1399 /* The record_will_replay method of target record-btrace. */
1402 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1404 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1407 /* The xfer_partial method of target record-btrace. */
1409 enum target_xfer_status
1410 record_btrace_target::xfer_partial (enum target_object object,
1411 const char *annex, gdb_byte *readbuf,
1412 const gdb_byte *writebuf, ULONGEST offset,
1413 ULONGEST len, ULONGEST *xfered_len)
1415 /* Filter out requests that don't make sense during replay. */
1416 if (replay_memory_access == replay_memory_access_read_only
1417 && !record_btrace_generating_corefile
1418 && record_is_replaying (inferior_ptid))
1422 case TARGET_OBJECT_MEMORY:
1424 struct target_section *section;
1426 /* We do not allow writing memory in general. */
1427 if (writebuf != NULL)
1430 return TARGET_XFER_UNAVAILABLE;
1433 /* We allow reading readonly memory. */
1434 section = target_section_by_addr (this, offset);
1435 if (section != NULL)
1437 /* Check if the section we found is readonly. */
1438 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1439 section->the_bfd_section)
1440 & SEC_READONLY) != 0)
1442 /* Truncate the request to fit into this section. */
1443 len = std::min (len, section->endaddr - offset);
1449 return TARGET_XFER_UNAVAILABLE;
1454 /* Forward the request. */
1455 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1456 offset, len, xfered_len);
1459 /* The insert_breakpoint method of target record-btrace. */
1462 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1463 struct bp_target_info *bp_tgt)
1468 /* Inserting breakpoints requires accessing memory. Allow it for the
1469 duration of this function. */
1470 old = replay_memory_access;
1471 replay_memory_access = replay_memory_access_read_write;
1476 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1478 catch (const gdb_exception &except)
1480 replay_memory_access = old;
1483 replay_memory_access = old;
1488 /* The remove_breakpoint method of target record-btrace. */
1491 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1492 struct bp_target_info *bp_tgt,
1493 enum remove_bp_reason reason)
1498 /* Removing breakpoints requires accessing memory. Allow it for the
1499 duration of this function. */
1500 old = replay_memory_access;
1501 replay_memory_access = replay_memory_access_read_write;
1506 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1508 catch (const gdb_exception &except)
1510 replay_memory_access = old;
1513 replay_memory_access = old;
1518 /* The fetch_registers method of target record-btrace. */
1521 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1523 struct btrace_insn_iterator *replay;
1524 struct thread_info *tp;
1526 tp = find_thread_ptid (regcache->ptid ());
1527 gdb_assert (tp != NULL);
1529 replay = tp->btrace.replay;
1530 if (replay != NULL && !record_btrace_generating_corefile)
1532 const struct btrace_insn *insn;
1533 struct gdbarch *gdbarch;
1536 gdbarch = regcache->arch ();
1537 pcreg = gdbarch_pc_regnum (gdbarch);
1541 /* We can only provide the PC register. */
1542 if (regno >= 0 && regno != pcreg)
1545 insn = btrace_insn_get (replay);
1546 gdb_assert (insn != NULL);
1548 regcache->raw_supply (regno, &insn->pc);
1551 this->beneath ()->fetch_registers (regcache, regno);
1554 /* The store_registers method of target record-btrace. */
1557 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1559 if (!record_btrace_generating_corefile
1560 && record_is_replaying (regcache->ptid ()))
1561 error (_("Cannot write registers while replaying."));
1563 gdb_assert (may_write_registers != 0);
1565 this->beneath ()->store_registers (regcache, regno);
1568 /* The prepare_to_store method of target record-btrace. */
1571 record_btrace_target::prepare_to_store (struct regcache *regcache)
1573 if (!record_btrace_generating_corefile
1574 && record_is_replaying (regcache->ptid ()))
1577 this->beneath ()->prepare_to_store (regcache);
1580 /* The branch trace frame cache. */
1582 struct btrace_frame_cache
1585 struct thread_info *tp;
1587 /* The frame info. */
1588 struct frame_info *frame;
1590 /* The branch trace function segment. */
1591 const struct btrace_function *bfun;
1594 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1596 static htab_t bfcache;
1598 /* hash_f for htab_create_alloc of bfcache. */
1601 bfcache_hash (const void *arg)
1603 const struct btrace_frame_cache *cache
1604 = (const struct btrace_frame_cache *) arg;
1606 return htab_hash_pointer (cache->frame);
1609 /* eq_f for htab_create_alloc of bfcache. */
1612 bfcache_eq (const void *arg1, const void *arg2)
1614 const struct btrace_frame_cache *cache1
1615 = (const struct btrace_frame_cache *) arg1;
1616 const struct btrace_frame_cache *cache2
1617 = (const struct btrace_frame_cache *) arg2;
1619 return cache1->frame == cache2->frame;
1622 /* Create a new btrace frame cache. */
1624 static struct btrace_frame_cache *
1625 bfcache_new (struct frame_info *frame)
1627 struct btrace_frame_cache *cache;
1630 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1631 cache->frame = frame;
1633 slot = htab_find_slot (bfcache, cache, INSERT);
1634 gdb_assert (*slot == NULL);
1640 /* Extract the branch trace function from a branch trace frame. */
1642 static const struct btrace_function *
1643 btrace_get_frame_function (struct frame_info *frame)
1645 const struct btrace_frame_cache *cache;
1646 struct btrace_frame_cache pattern;
1649 pattern.frame = frame;
1651 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1655 cache = (const struct btrace_frame_cache *) *slot;
1659 /* Implement stop_reason method for record_btrace_frame_unwind. */
1661 static enum unwind_stop_reason
1662 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1665 const struct btrace_frame_cache *cache;
1666 const struct btrace_function *bfun;
1668 cache = (const struct btrace_frame_cache *) *this_cache;
1670 gdb_assert (bfun != NULL);
1673 return UNWIND_UNAVAILABLE;
1675 return UNWIND_NO_REASON;
1678 /* Implement this_id method for record_btrace_frame_unwind. */
1681 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1682 struct frame_id *this_id)
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
1686 struct btrace_call_iterator it;
1687 CORE_ADDR code, special;
1689 cache = (const struct btrace_frame_cache *) *this_cache;
1692 gdb_assert (bfun != NULL);
1694 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1695 bfun = btrace_call_get (&it);
1697 code = get_frame_func (this_frame);
1698 special = bfun->number;
1700 *this_id = frame_id_build_unavailable_stack_special (code, special);
1702 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1703 btrace_get_bfun_name (cache->bfun),
1704 core_addr_to_string_nz (this_id->code_addr),
1705 core_addr_to_string_nz (this_id->special_addr));
1708 /* Implement prev_register method for record_btrace_frame_unwind. */
1710 static struct value *
1711 record_btrace_frame_prev_register (struct frame_info *this_frame,
1715 const struct btrace_frame_cache *cache;
1716 const struct btrace_function *bfun, *caller;
1717 struct btrace_call_iterator it;
1718 struct gdbarch *gdbarch;
1722 gdbarch = get_frame_arch (this_frame);
1723 pcreg = gdbarch_pc_regnum (gdbarch);
1724 if (pcreg < 0 || regnum != pcreg)
1725 throw_error (NOT_AVAILABLE_ERROR,
1726 _("Registers are not available in btrace record history"));
1728 cache = (const struct btrace_frame_cache *) *this_cache;
1730 gdb_assert (bfun != NULL);
1732 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("No caller in btrace record history"));
1736 caller = btrace_call_get (&it);
1738 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1739 pc = caller->insn.front ().pc;
1742 pc = caller->insn.back ().pc;
1743 pc += gdb_insn_length (gdbarch, pc);
1746 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1747 btrace_get_bfun_name (bfun), bfun->level,
1748 core_addr_to_string_nz (pc));
1750 return frame_unwind_got_address (this_frame, regnum, pc);
1753 /* Implement sniffer method for record_btrace_frame_unwind. */
1756 record_btrace_frame_sniffer (const struct frame_unwind *self,
1757 struct frame_info *this_frame,
1760 const struct btrace_function *bfun;
1761 struct btrace_frame_cache *cache;
1762 struct thread_info *tp;
1763 struct frame_info *next;
1765 /* THIS_FRAME does not contain a reference to its thread. */
1766 tp = inferior_thread ();
1769 next = get_next_frame (this_frame);
1772 const struct btrace_insn_iterator *replay;
1774 replay = tp->btrace.replay;
1776 bfun = &replay->btinfo->functions[replay->call_index];
1780 const struct btrace_function *callee;
1781 struct btrace_call_iterator it;
1783 callee = btrace_get_frame_function (next);
1784 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1787 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1790 bfun = btrace_call_get (&it);
1796 DEBUG ("[frame] sniffed frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1804 *this_cache = cache;
1808 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1811 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1812 struct frame_info *this_frame,
1815 const struct btrace_function *bfun, *callee;
1816 struct btrace_frame_cache *cache;
1817 struct btrace_call_iterator it;
1818 struct frame_info *next;
1819 struct thread_info *tinfo;
1821 next = get_next_frame (this_frame);
1825 callee = btrace_get_frame_function (next);
1829 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1832 tinfo = inferior_thread ();
1833 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1836 bfun = btrace_call_get (&it);
1838 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1839 btrace_get_bfun_name (bfun), bfun->level);
1841 /* This is our frame. Initialize the frame cache. */
1842 cache = bfcache_new (this_frame);
1846 *this_cache = cache;
1851 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1853 struct btrace_frame_cache *cache;
1856 cache = (struct btrace_frame_cache *) this_cache;
1858 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1859 gdb_assert (slot != NULL);
1861 htab_remove_elt (bfcache, cache);
1864 /* btrace recording does not store previous memory content, neither the stack
1865 frames content. Any unwinding would return errorneous results as the stack
1866 contents no longer matches the changed PC value restored from history.
1867 Therefore this unwinder reports any possibly unwound registers as
1870 const struct frame_unwind record_btrace_frame_unwind =
1873 record_btrace_frame_unwind_stop_reason,
1874 record_btrace_frame_this_id,
1875 record_btrace_frame_prev_register,
1877 record_btrace_frame_sniffer,
1878 record_btrace_frame_dealloc_cache
1881 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1884 record_btrace_frame_unwind_stop_reason,
1885 record_btrace_frame_this_id,
1886 record_btrace_frame_prev_register,
1888 record_btrace_tailcall_frame_sniffer,
1889 record_btrace_frame_dealloc_cache
1892 /* Implement the get_unwinder method. */
1894 const struct frame_unwind *
1895 record_btrace_target::get_unwinder ()
1897 return &record_btrace_frame_unwind;
1900 /* Implement the get_tailcall_unwinder method. */
1902 const struct frame_unwind *
1903 record_btrace_target::get_tailcall_unwinder ()
1905 return &record_btrace_tailcall_frame_unwind;
1908 /* Return a human-readable string for FLAG. */
1911 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1919 return "reverse-step";
1925 return "reverse-cont";
1934 /* Indicate that TP should be resumed according to FLAG. */
1937 record_btrace_resume_thread (struct thread_info *tp,
1938 enum btrace_thread_flag flag)
1940 struct btrace_thread_info *btinfo;
1942 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1943 target_pid_to_str (tp->ptid).c_str (), flag,
1944 btrace_thread_flag_to_str (flag));
1946 btinfo = &tp->btrace;
1948 /* Fetch the latest branch trace. */
1949 btrace_fetch (tp, record_btrace_get_cpu ());
1951 /* A resume request overwrites a preceding resume or stop request. */
1952 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1953 btinfo->flags |= flag;
1956 /* Get the current frame for TP. */
1958 static struct frame_id
1959 get_thread_current_frame_id (struct thread_info *tp)
1964 /* Set current thread, which is implicitly used by
1965 get_current_frame. */
1966 scoped_restore_current_thread restore_thread;
1968 switch_to_thread (tp);
1970 /* Clear the executing flag to allow changes to the current frame.
1971 We are not actually running, yet. We just started a reverse execution
1972 command or a record goto command.
1973 For the latter, EXECUTING is false and this has no effect.
1974 For the former, EXECUTING is true and we're in wait, about to
1975 move the thread. Since we need to recompute the stack, we temporarily
1976 set EXECUTING to flase. */
1977 executing = tp->executing;
1978 set_executing (inferior_ptid, false);
1983 id = get_frame_id (get_current_frame ());
1985 catch (const gdb_exception &except)
1987 /* Restore the previous execution state. */
1988 set_executing (inferior_ptid, executing);
1993 /* Restore the previous execution state. */
1994 set_executing (inferior_ptid, executing);
1999 /* Start replaying a thread. */
2001 static struct btrace_insn_iterator *
2002 record_btrace_start_replaying (struct thread_info *tp)
2004 struct btrace_insn_iterator *replay;
2005 struct btrace_thread_info *btinfo;
2007 btinfo = &tp->btrace;
2010 /* We can't start replaying without trace. */
2011 if (btinfo->functions.empty ())
2014 /* GDB stores the current frame_id when stepping in order to detects steps
2016 Since frames are computed differently when we're replaying, we need to
2017 recompute those stored frames and fix them up so we can still detect
2018 subroutines after we started replaying. */
2021 struct frame_id frame_id;
2022 int upd_step_frame_id, upd_step_stack_frame_id;
2024 /* The current frame without replaying - computed via normal unwind. */
2025 frame_id = get_thread_current_frame_id (tp);
2027 /* Check if we need to update any stepping-related frame id's. */
2028 upd_step_frame_id = frame_id_eq (frame_id,
2029 tp->control.step_frame_id);
2030 upd_step_stack_frame_id = frame_id_eq (frame_id,
2031 tp->control.step_stack_frame_id);
2033 /* We start replaying at the end of the branch trace. This corresponds
2034 to the current instruction. */
2035 replay = XNEW (struct btrace_insn_iterator);
2036 btrace_insn_end (replay, btinfo);
2038 /* Skip gaps at the end of the trace. */
2039 while (btrace_insn_get (replay) == NULL)
2043 steps = btrace_insn_prev (replay, 1);
2045 error (_("No trace."));
2048 /* We're not replaying, yet. */
2049 gdb_assert (btinfo->replay == NULL);
2050 btinfo->replay = replay;
2052 /* Make sure we're not using any stale registers. */
2053 registers_changed_thread (tp);
2055 /* The current frame with replaying - computed via btrace unwind. */
2056 frame_id = get_thread_current_frame_id (tp);
2058 /* Replace stepping related frames where necessary. */
2059 if (upd_step_frame_id)
2060 tp->control.step_frame_id = frame_id;
2061 if (upd_step_stack_frame_id)
2062 tp->control.step_stack_frame_id = frame_id;
2064 catch (const gdb_exception &except)
2066 xfree (btinfo->replay);
2067 btinfo->replay = NULL;
2069 registers_changed_thread (tp);
2077 /* Stop replaying a thread. */
2080 record_btrace_stop_replaying (struct thread_info *tp)
2082 struct btrace_thread_info *btinfo;
2084 btinfo = &tp->btrace;
2086 xfree (btinfo->replay);
2087 btinfo->replay = NULL;
2089 /* Make sure we're not leaving any stale registers. */
2090 registers_changed_thread (tp);
2093 /* Stop replaying TP if it is at the end of its execution history. */
2096 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2098 struct btrace_insn_iterator *replay, end;
2099 struct btrace_thread_info *btinfo;
2101 btinfo = &tp->btrace;
2102 replay = btinfo->replay;
2107 btrace_insn_end (&end, btinfo);
2109 if (btrace_insn_cmp (replay, &end) == 0)
2110 record_btrace_stop_replaying (tp);
2113 /* The resume method of target record-btrace. */
2116 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2118 enum btrace_thread_flag flag, cflag;
2120 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2121 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2122 step ? "step" : "cont");
2124 /* Store the execution direction of the last resume.
2126 If there is more than one resume call, we have to rely on infrun
2127 to not change the execution direction in-between. */
2128 record_btrace_resume_exec_dir = ::execution_direction;
2130 /* As long as we're not replaying, just forward the request.
2132 For non-stop targets this means that no thread is replaying. In order to
2133 make progress, we may need to explicitly move replaying threads to the end
2134 of their execution history. */
2135 if ((::execution_direction != EXEC_REVERSE)
2136 && !record_is_replaying (minus_one_ptid))
2138 this->beneath ()->resume (ptid, step, signal);
2142 /* Compute the btrace thread flag for the requested move. */
2143 if (::execution_direction == EXEC_REVERSE)
2145 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2150 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2154 /* We just indicate the resume intent here. The actual stepping happens in
2155 record_btrace_wait below.
2157 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2158 if (!target_is_non_stop_p ())
2160 gdb_assert (inferior_ptid.matches (ptid));
2162 for (thread_info *tp : all_non_exited_threads (ptid))
2164 if (tp->ptid.matches (inferior_ptid))
2165 record_btrace_resume_thread (tp, flag);
2167 record_btrace_resume_thread (tp, cflag);
2172 for (thread_info *tp : all_non_exited_threads (ptid))
2173 record_btrace_resume_thread (tp, flag);
2176 /* Async support. */
2177 if (target_can_async_p ())
2180 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2184 /* The commit_resume method of target record-btrace. */
2187 record_btrace_target::commit_resume ()
2189 if ((::execution_direction != EXEC_REVERSE)
2190 && !record_is_replaying (minus_one_ptid))
2191 beneath ()->commit_resume ();
2194 /* Cancel resuming TP. */
2197 record_btrace_cancel_resume (struct thread_info *tp)
2199 enum btrace_thread_flag flags;
2201 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2205 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2206 print_thread_id (tp),
2207 target_pid_to_str (tp->ptid).c_str (), flags,
2208 btrace_thread_flag_to_str (flags));
2210 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2211 record_btrace_stop_replaying_at_end (tp);
2214 /* Return a target_waitstatus indicating that we ran out of history. */
2216 static struct target_waitstatus
2217 btrace_step_no_history (void)
2219 struct target_waitstatus status;
2221 status.kind = TARGET_WAITKIND_NO_HISTORY;
2226 /* Return a target_waitstatus indicating that a step finished. */
2228 static struct target_waitstatus
2229 btrace_step_stopped (void)
2231 struct target_waitstatus status;
2233 status.kind = TARGET_WAITKIND_STOPPED;
2234 status.value.sig = GDB_SIGNAL_TRAP;
2239 /* Return a target_waitstatus indicating that a thread was stopped as
2242 static struct target_waitstatus
2243 btrace_step_stopped_on_request (void)
2245 struct target_waitstatus status;
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_0;
2253 /* Return a target_waitstatus indicating a spurious stop. */
2255 static struct target_waitstatus
2256 btrace_step_spurious (void)
2258 struct target_waitstatus status;
2260 status.kind = TARGET_WAITKIND_SPURIOUS;
2265 /* Return a target_waitstatus indicating that the thread was not resumed. */
2267 static struct target_waitstatus
2268 btrace_step_no_resumed (void)
2270 struct target_waitstatus status;
2272 status.kind = TARGET_WAITKIND_NO_RESUMED;
2277 /* Return a target_waitstatus indicating that we should wait again. */
2279 static struct target_waitstatus
2280 btrace_step_again (void)
2282 struct target_waitstatus status;
2284 status.kind = TARGET_WAITKIND_IGNORE;
2289 /* Clear the record histories. */
2292 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2294 xfree (btinfo->insn_history);
2295 xfree (btinfo->call_history);
2297 btinfo->insn_history = NULL;
2298 btinfo->call_history = NULL;
2301 /* Check whether TP's current replay position is at a breakpoint. */
2304 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
2308 const struct btrace_insn *insn;
2310 btinfo = &tp->btrace;
2311 replay = btinfo->replay;
2316 insn = btrace_insn_get (replay);
2320 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2321 &btinfo->stop_reason);
2324 /* Step one instruction in forward direction. */
2326 static struct target_waitstatus
2327 record_btrace_single_step_forward (struct thread_info *tp)
2329 struct btrace_insn_iterator *replay, end, start;
2330 struct btrace_thread_info *btinfo;
2332 btinfo = &tp->btrace;
2333 replay = btinfo->replay;
2335 /* We're done if we're not replaying. */
2337 return btrace_step_no_history ();
2339 /* Check if we're stepping a breakpoint. */
2340 if (record_btrace_replay_at_breakpoint (tp))
2341 return btrace_step_stopped ();
2343 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2344 jump back to the instruction at which we started. */
2350 /* We will bail out here if we continue stepping after reaching the end
2351 of the execution history. */
2352 steps = btrace_insn_next (replay, 1);
2356 return btrace_step_no_history ();
2359 while (btrace_insn_get (replay) == NULL);
2361 /* Determine the end of the instruction trace. */
2362 btrace_insn_end (&end, btinfo);
2364 /* The execution trace contains (and ends with) the current instruction.
2365 This instruction has not been executed, yet, so the trace really ends
2366 one instruction earlier. */
2367 if (btrace_insn_cmp (replay, &end) == 0)
2368 return btrace_step_no_history ();
2370 return btrace_step_spurious ();
2373 /* Step one instruction in backward direction. */
2375 static struct target_waitstatus
2376 record_btrace_single_step_backward (struct thread_info *tp)
2378 struct btrace_insn_iterator *replay, start;
2379 struct btrace_thread_info *btinfo;
2381 btinfo = &tp->btrace;
2382 replay = btinfo->replay;
2384 /* Start replaying if we're not already doing so. */
2386 replay = record_btrace_start_replaying (tp);
2388 /* If we can't step any further, we reached the end of the history.
2389 Skip gaps during replay. If we end up at a gap (at the beginning of
2390 the trace), jump back to the instruction at which we started. */
2396 steps = btrace_insn_prev (replay, 1);
2400 return btrace_step_no_history ();
2403 while (btrace_insn_get (replay) == NULL);
2405 /* Check if we're stepping a breakpoint.
2407 For reverse-stepping, this check is after the step. There is logic in
2408 infrun.c that handles reverse-stepping separately. See, for example,
2409 proceed and adjust_pc_after_break.
2411 This code assumes that for reverse-stepping, PC points to the last
2412 de-executed instruction, whereas for forward-stepping PC points to the
2413 next to-be-executed instruction. */
2414 if (record_btrace_replay_at_breakpoint (tp))
2415 return btrace_step_stopped ();
2417 return btrace_step_spurious ();
2420 /* Step a single thread. */
2422 static struct target_waitstatus
2423 record_btrace_step_thread (struct thread_info *tp)
2425 struct btrace_thread_info *btinfo;
2426 struct target_waitstatus status;
2427 enum btrace_thread_flag flags;
2429 btinfo = &tp->btrace;
2431 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2432 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2434 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2435 target_pid_to_str (tp->ptid).c_str (), flags,
2436 btrace_thread_flag_to_str (flags));
2438 /* We can't step without an execution history. */
2439 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2440 return btrace_step_no_history ();
2445 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2448 return btrace_step_stopped_on_request ();
2451 status = record_btrace_single_step_forward (tp);
2452 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2455 return btrace_step_stopped ();
2458 status = record_btrace_single_step_backward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2462 return btrace_step_stopped ();
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2469 btinfo->flags |= flags;
2470 return btrace_step_again ();
2473 status = record_btrace_single_step_backward (tp);
2474 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2477 btinfo->flags |= flags;
2478 return btrace_step_again ();
2481 /* We keep threads moving at the end of their execution history. The wait
2482 method will stop the thread for whom the event is reported. */
2483 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2484 btinfo->flags |= flags;
2489 /* Announce further events if necessary. */
2492 record_btrace_maybe_mark_async_event
2493 (const std::vector<thread_info *> &moving,
2494 const std::vector<thread_info *> &no_history)
2496 bool more_moving = !moving.empty ();
2497 bool more_no_history = !no_history.empty ();;
2499 if (!more_moving && !more_no_history)
2503 DEBUG ("movers pending");
2505 if (more_no_history)
2506 DEBUG ("no-history pending");
2508 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2511 /* The wait method of target record-btrace. */
2514 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2517 std::vector<thread_info *> moving;
2518 std::vector<thread_info *> no_history;
2520 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2522 /* As long as we're not replaying, just forward the request. */
2523 if ((::execution_direction != EXEC_REVERSE)
2524 && !record_is_replaying (minus_one_ptid))
2526 return this->beneath ()->wait (ptid, status, options);
2529 /* Keep a work list of moving threads. */
2530 for (thread_info *tp : all_non_exited_threads (ptid))
2531 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2532 moving.push_back (tp);
2534 if (moving.empty ())
2536 *status = btrace_step_no_resumed ();
2538 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2539 target_waitstatus_to_string (status).c_str ());
2544 /* Step moving threads one by one, one step each, until either one thread
2545 reports an event or we run out of threads to step.
2547 When stepping more than one thread, chances are that some threads reach
2548 the end of their execution history earlier than others. If we reported
2549 this immediately, all-stop on top of non-stop would stop all threads and
2550 resume the same threads next time. And we would report the same thread
2551 having reached the end of its execution history again.
2553 In the worst case, this would starve the other threads. But even if other
2554 threads would be allowed to make progress, this would result in far too
2555 many intermediate stops.
2557 We therefore delay the reporting of "no execution history" until we have
2558 nothing else to report. By this time, all threads should have moved to
2559 either the beginning or the end of their execution history. There will
2560 be a single user-visible stop. */
2561 struct thread_info *eventing = NULL;
2562 while ((eventing == NULL) && !moving.empty ())
2564 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2566 thread_info *tp = moving[ix];
2568 *status = record_btrace_step_thread (tp);
2570 switch (status->kind)
2572 case TARGET_WAITKIND_IGNORE:
2576 case TARGET_WAITKIND_NO_HISTORY:
2577 no_history.push_back (ordered_remove (moving, ix));
2581 eventing = unordered_remove (moving, ix);
2587 if (eventing == NULL)
2589 /* We started with at least one moving thread. This thread must have
2590 either stopped or reached the end of its execution history.
2592 In the former case, EVENTING must not be NULL.
2593 In the latter case, NO_HISTORY must not be empty. */
2594 gdb_assert (!no_history.empty ());
2596 /* We kept threads moving at the end of their execution history. Stop
2597 EVENTING now that we are going to report its stop. */
2598 eventing = unordered_remove (no_history, 0);
2599 eventing->btrace.flags &= ~BTHR_MOVE;
2601 *status = btrace_step_no_history ();
2604 gdb_assert (eventing != NULL);
2606 /* We kept threads replaying at the end of their execution history. Stop
2607 replaying EVENTING now that we are going to report its stop. */
2608 record_btrace_stop_replaying_at_end (eventing);
2610 /* Stop all other threads. */
2611 if (!target_is_non_stop_p ())
2613 for (thread_info *tp : all_non_exited_threads ())
2614 record_btrace_cancel_resume (tp);
2617 /* In async mode, we need to announce further events. */
2618 if (target_is_async_p ())
2619 record_btrace_maybe_mark_async_event (moving, no_history);
2621 /* Start record histories anew from the current position. */
2622 record_btrace_clear_histories (&eventing->btrace);
2624 /* We moved the replay position but did not update registers. */
2625 registers_changed_thread (eventing);
2627 DEBUG ("wait ended by thread %s (%s): %s",
2628 print_thread_id (eventing),
2629 target_pid_to_str (eventing->ptid).c_str (),
2630 target_waitstatus_to_string (status).c_str ());
2632 return eventing->ptid;
2635 /* The stop method of target record-btrace. */
2638 record_btrace_target::stop (ptid_t ptid)
2640 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2642 /* As long as we're not replaying, just forward the request. */
2643 if ((::execution_direction != EXEC_REVERSE)
2644 && !record_is_replaying (minus_one_ptid))
2646 this->beneath ()->stop (ptid);
2650 for (thread_info *tp : all_non_exited_threads (ptid))
2652 tp->btrace.flags &= ~BTHR_MOVE;
2653 tp->btrace.flags |= BTHR_STOP;
2658 /* The can_execute_reverse method of target record-btrace. */
2661 record_btrace_target::can_execute_reverse ()
2666 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2669 record_btrace_target::stopped_by_sw_breakpoint ()
2671 if (record_is_replaying (minus_one_ptid))
2673 struct thread_info *tp = inferior_thread ();
2675 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2678 return this->beneath ()->stopped_by_sw_breakpoint ();
2681 /* The supports_stopped_by_sw_breakpoint method of target
2685 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2687 if (record_is_replaying (minus_one_ptid))
2690 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2693 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2696 record_btrace_target::stopped_by_hw_breakpoint ()
2698 if (record_is_replaying (minus_one_ptid))
2700 struct thread_info *tp = inferior_thread ();
2702 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2705 return this->beneath ()->stopped_by_hw_breakpoint ();
2708 /* The supports_stopped_by_hw_breakpoint method of target
2712 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2714 if (record_is_replaying (minus_one_ptid))
2717 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2720 /* The update_thread_list method of target record-btrace. */
2723 record_btrace_target::update_thread_list ()
2725 /* We don't add or remove threads during replay. */
2726 if (record_is_replaying (minus_one_ptid))
2729 /* Forward the request. */
2730 this->beneath ()->update_thread_list ();
2733 /* The thread_alive method of target record-btrace. */
2736 record_btrace_target::thread_alive (ptid_t ptid)
2738 /* We don't add or remove threads during replay. */
2739 if (record_is_replaying (minus_one_ptid))
2742 /* Forward the request. */
2743 return this->beneath ()->thread_alive (ptid);
2746 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2750 record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2753 struct btrace_thread_info *btinfo;
2755 btinfo = &tp->btrace;
2758 record_btrace_stop_replaying (tp);
2761 if (btinfo->replay == NULL)
2762 record_btrace_start_replaying (tp);
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2766 *btinfo->replay = *it;
2767 registers_changed_thread (tp);
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
2773 inferior_thread ()->suspend.stop_pc
2774 = regcache_read_pc (get_current_regcache ());
2775 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2778 /* The goto_record_begin method of target record-btrace. */
2781 record_btrace_target::goto_record_begin ()
2783 struct thread_info *tp;
2784 struct btrace_insn_iterator begin;
2786 tp = require_btrace_thread ();
2788 btrace_insn_begin (&begin, &tp->btrace);
2790 /* Skip gaps at the beginning of the trace. */
2791 while (btrace_insn_get (&begin) == NULL)
2795 steps = btrace_insn_next (&begin, 1);
2797 error (_("No trace."));
2800 record_btrace_set_replay (tp, &begin);
2803 /* The goto_record_end method of target record-btrace. */
2806 record_btrace_target::goto_record_end ()
2808 struct thread_info *tp;
2810 tp = require_btrace_thread ();
2812 record_btrace_set_replay (tp, NULL);
2815 /* The goto_record method of target record-btrace. */
2818 record_btrace_target::goto_record (ULONGEST insn)
2820 struct thread_info *tp;
2821 struct btrace_insn_iterator it;
2822 unsigned int number;
2827 /* Check for wrap-arounds. */
2829 error (_("Instruction number out of range."));
2831 tp = require_btrace_thread ();
2833 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2835 /* Check if the instruction could not be found or is a gap. */
2836 if (found == 0 || btrace_insn_get (&it) == NULL)
2837 error (_("No such instruction."));
2839 record_btrace_set_replay (tp, &it);
2842 /* The record_stop_replaying method of target record-btrace. */
2845 record_btrace_target::record_stop_replaying ()
2847 for (thread_info *tp : all_non_exited_threads ())
2848 record_btrace_stop_replaying (tp);
2851 /* The execution_direction target method. */
2853 enum exec_direction_kind
2854 record_btrace_target::execution_direction ()
2856 return record_btrace_resume_exec_dir;
2859 /* The prepare_to_generate_core target method. */
2862 record_btrace_target::prepare_to_generate_core ()
2864 record_btrace_generating_corefile = 1;
2867 /* The done_generating_core target method. */
2870 record_btrace_target::done_generating_core ()
2872 record_btrace_generating_corefile = 0;
2875 /* Start recording in BTS format. */
2878 cmd_record_btrace_bts_start (const char *args, int from_tty)
2880 if (args != NULL && *args != 0)
2881 error (_("Invalid argument."));
2883 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2887 execute_command ("target record-btrace", from_tty);
2889 catch (const gdb_exception &exception)
2891 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2896 /* Start recording in Intel Processor Trace format. */
2899 cmd_record_btrace_pt_start (const char *args, int from_tty)
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2904 record_btrace_conf.format = BTRACE_FORMAT_PT;
2908 execute_command ("target record-btrace", from_tty);
2910 catch (const gdb_exception &exception)
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2917 /* Alias for "target record". */
2920 cmd_record_btrace_start (const char *args, int from_tty)
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2925 record_btrace_conf.format = BTRACE_FORMAT_PT;
2929 execute_command ("target record-btrace", from_tty);
2931 catch (const gdb_exception &exception)
2933 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2937 execute_command ("target record-btrace", from_tty);
2939 catch (const gdb_exception &ex)
2941 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2947 /* The "set record btrace" command. */
2950 cmd_set_record_btrace (const char *args, int from_tty)
2952 printf_unfiltered (_("\"set record btrace\" must be followed "
2953 "by an appropriate subcommand.\n"));
2954 help_list (set_record_btrace_cmdlist, "set record btrace ",
2955 all_commands, gdb_stdout);
2958 /* The "show record btrace" command. */
2961 cmd_show_record_btrace (const char *args, int from_tty)
2963 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2966 /* The "show record btrace replay-memory-access" command. */
2969 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2970 struct cmd_list_element *c, const char *value)
2972 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2973 replay_memory_access);
2976 /* The "set record btrace cpu none" command. */
2979 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2981 if (args != nullptr && *args != 0)
2982 error (_("Trailing junk: '%s'."), args);
2984 record_btrace_cpu_state = CS_NONE;
2987 /* The "set record btrace cpu auto" command. */
2990 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2995 record_btrace_cpu_state = CS_AUTO;
2998 /* The "set record btrace cpu" command. */
3001 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3003 if (args == nullptr)
3006 /* We use a hard-coded vendor string for now. */
3007 unsigned int family, model, stepping;
3008 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3009 &model, &l1, &stepping, &l2);
3012 if (strlen (args) != l2)
3013 error (_("Trailing junk: '%s'."), args + l2);
3015 else if (matches == 2)
3017 if (strlen (args) != l1)
3018 error (_("Trailing junk: '%s'."), args + l1);
3023 error (_("Bad format. See \"help set record btrace cpu\"."));
3025 if (USHRT_MAX < family)
3026 error (_("Cpu family too big."));
3028 if (UCHAR_MAX < model)
3029 error (_("Cpu model too big."));
3031 if (UCHAR_MAX < stepping)
3032 error (_("Cpu stepping too big."));
3034 record_btrace_cpu.vendor = CV_INTEL;
3035 record_btrace_cpu.family = family;
3036 record_btrace_cpu.model = model;
3037 record_btrace_cpu.stepping = stepping;
3039 record_btrace_cpu_state = CS_CPU;
3042 /* The "show record btrace cpu" command. */
3045 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3047 if (args != nullptr && *args != 0)
3048 error (_("Trailing junk: '%s'."), args);
3050 switch (record_btrace_cpu_state)
3053 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3057 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3061 switch (record_btrace_cpu.vendor)
3064 if (record_btrace_cpu.stepping == 0)
3065 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3066 record_btrace_cpu.family,
3067 record_btrace_cpu.model);
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3070 record_btrace_cpu.family,
3071 record_btrace_cpu.model,
3072 record_btrace_cpu.stepping);
3077 error (_("Internal error: bad cpu state."));
3080 /* The "s record btrace bts" command. */
3083 cmd_set_record_btrace_bts (const char *args, int from_tty)
3085 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3086 "by an appropriate subcommand.\n"));
3087 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3088 all_commands, gdb_stdout);
3091 /* The "show record btrace bts" command. */
3094 cmd_show_record_btrace_bts (const char *args, int from_tty)
3096 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3099 /* The "set record btrace pt" command. */
3102 cmd_set_record_btrace_pt (const char *args, int from_tty)
3104 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3105 "by an appropriate subcommand.\n"));
3106 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3107 all_commands, gdb_stdout);
3110 /* The "show record btrace pt" command. */
3113 cmd_show_record_btrace_pt (const char *args, int from_tty)
3115 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3118 /* The "record bts buffer-size" show value function. */
3121 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3122 struct cmd_list_element *c,
3125 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3129 /* The "record pt buffer-size" show value function. */
3132 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3136 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3140 /* Initialize btrace commands. */
3143 _initialize_record_btrace (void)
3145 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3146 _("Start branch trace recording."), &record_btrace_cmdlist,
3147 "record btrace ", 0, &record_cmdlist);
3148 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3150 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3152 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3153 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3154 This format may not be available on all processors."),
3155 &record_btrace_cmdlist);
3156 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3158 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3160 Start branch trace recording in Intel Processor Trace format.\n\n\
3161 This format may not be available on all processors."),
3162 &record_btrace_cmdlist);
3163 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3165 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3166 _("Set record options."), &set_record_btrace_cmdlist,
3167 "set record btrace ", 0, &set_record_cmdlist);
3169 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3170 _("Show record options."), &show_record_btrace_cmdlist,
3171 "show record btrace ", 0, &show_record_cmdlist);
3173 add_setshow_enum_cmd ("replay-memory-access", no_class,
3174 replay_memory_access_types, &replay_memory_access, _("\
3175 Set what memory accesses are allowed during replay."), _("\
3176 Show what memory accesses are allowed during replay."),
3177 _("Default is READ-ONLY.\n\n\
3178 The btrace record target does not trace data.\n\
3179 The memory therefore corresponds to the live target and not \
3180 to the current replay position.\n\n\
3181 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3182 When READ-WRITE, allow accesses to read-only and read-write memory during \
3184 NULL, cmd_show_replay_memory_access,
3185 &set_record_btrace_cmdlist,
3186 &show_record_btrace_cmdlist);
3188 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3190 Set the cpu to be used for trace decode.\n\n\
3191 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3192 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3193 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3194 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3195 When GDB does not support that cpu, this option can be used to enable\n\
3196 workarounds for a similar cpu that GDB supports.\n\n\
3197 When set to \"none\", errata workarounds are disabled."),
3198 &set_record_btrace_cpu_cmdlist,
3199 "set record btrace cpu ", 1,
3200 &set_record_btrace_cmdlist);
3202 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3203 Automatically determine the cpu to be used for trace decode."),
3204 &set_record_btrace_cpu_cmdlist);
3206 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3207 Do not enable errata workarounds for trace decode."),
3208 &set_record_btrace_cpu_cmdlist);
3210 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3211 Show the cpu to be used for trace decode."),
3212 &show_record_btrace_cmdlist);
3214 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3215 _("Set record btrace bts options."),
3216 &set_record_btrace_bts_cmdlist,
3217 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3219 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3220 _("Show record btrace bts options."),
3221 &show_record_btrace_bts_cmdlist,
3222 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3224 add_setshow_uinteger_cmd ("buffer-size", no_class,
3225 &record_btrace_conf.bts.size,
3226 _("Set the record/replay bts buffer size."),
3227 _("Show the record/replay bts buffer size."), _("\
3228 When starting recording request a trace buffer of this size. \
3229 The actual buffer size may differ from the requested size. \
3230 Use \"info record\" to see the actual buffer size.\n\n\
3231 Bigger buffers allow longer recording but also take more time to process \
3232 the recorded execution trace.\n\n\
3233 The trace buffer size may not be changed while recording."), NULL,
3234 show_record_bts_buffer_size_value,
3235 &set_record_btrace_bts_cmdlist,
3236 &show_record_btrace_bts_cmdlist);
3238 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3239 _("Set record btrace pt options."),
3240 &set_record_btrace_pt_cmdlist,
3241 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3243 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3244 _("Show record btrace pt options."),
3245 &show_record_btrace_pt_cmdlist,
3246 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3248 add_setshow_uinteger_cmd ("buffer-size", no_class,
3249 &record_btrace_conf.pt.size,
3250 _("Set the record/replay pt buffer size."),
3251 _("Show the record/replay pt buffer size."), _("\
3252 Bigger buffers allow longer recording but also take more time to process \
3253 the recorded execution.\n\
3254 The actual buffer size may differ from the requested size. Use \"info record\" \
3255 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3256 &set_record_btrace_pt_cmdlist,
3257 &show_record_btrace_pt_cmdlist);
3259 add_target (record_btrace_target_info, record_btrace_target_open);
3261 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3264 record_btrace_conf.bts.size = 64 * 1024;
3265 record_btrace_conf.pt.size = 16 * 1024;