1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
41 #include "gdbsupport/vec.h"
46 static const target_info record_btrace_target_info = {
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
52 /* The target_ops of record-btrace. */
54 class record_btrace_target final : public target_ops
57 const target_info &info () const override
58 { return record_btrace_target_info; }
60 strata stratum () const override { return record_stratum; }
62 void close () override;
63 void async (int) override;
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
68 void disconnect (const char *, int) override;
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
74 { record_kill (this); }
76 enum record_method record_method (ptid_t ptid) override;
78 void stop_recording () override;
79 void info_record () override;
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
96 enum target_xfer_status xfer_partial (enum target_object object,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
108 void fetch_registers (struct regcache *, int) override;
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
113 const struct frame_unwind *get_unwinder () override;
115 const struct frame_unwind *get_tailcall_unwinder () override;
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
128 bool can_execute_reverse () override;
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
141 static record_btrace_target record_btrace_ops;
143 /* Initialize the record-btrace target ops. */
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token {};
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
209 #define DEBUG(msg, args...) \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
224 switch (record_btrace_cpu_state)
230 record_btrace_cpu.vendor = CV_UNKNOWN;
233 return &record_btrace_cpu;
236 error (_("Internal error: bad record btrace cpu state."));
239 /* Update the branch trace for the current thread and return a pointer to its
242 Throws an error if there is no thread or no trace. This function never
245 static struct thread_info *
246 require_btrace_thread (void)
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
253 thread_info *tp = inferior_thread ();
255 validate_registers_access ();
257 btrace_fetch (tp, record_btrace_get_cpu ());
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
268 Throws an error if there is no thread or no trace. This function never
271 static struct btrace_thread_info *
272 require_btrace (void)
274 struct thread_info *tp;
276 tp = require_btrace_thread ();
281 /* Enable branch tracing for one thread. Warn on errors. */
284 record_btrace_enable_warn (struct thread_info *tp)
288 btrace_enable (tp, &record_btrace_conf);
290 catch (const gdb_exception_error &error)
292 warning ("%s", error.what ());
296 /* Enable automatic tracing of new threads. */
299 record_btrace_auto_enable (void)
301 DEBUG ("attach thread observer");
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
307 /* Disable automatic tracing of new threads. */
310 record_btrace_auto_disable (void)
312 DEBUG ("detach thread observer");
314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
317 /* The record-btrace async event handler function. */
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 inferior_event_handler (INF_REG_EVENT, NULL);
325 /* See record-btrace.h. */
328 record_btrace_push_target (void)
332 record_btrace_auto_enable ();
334 push_target (&record_btrace_ops);
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 record_btrace_generating_corefile = 0;
341 format = btrace_format_short_string (record_btrace_conf.format);
342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
345 /* Disable btrace on a set of threads on scope exit. */
347 struct scoped_btrace_disable
349 scoped_btrace_disable () = default;
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353 ~scoped_btrace_disable ()
355 for (thread_info *tp : m_threads)
359 void add_thread (thread_info *thread)
361 m_threads.push_front (thread);
370 std::forward_list<thread_info *> m_threads;
373 /* Open target record-btrace. */
376 record_btrace_target_open (const char *args, int from_tty)
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
386 if (!target_has_execution)
387 error (_("The program is not being run."));
389 for (thread_info *tp : all_non_exited_threads ())
390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
392 btrace_enable (tp, &record_btrace_conf);
394 btrace_disable.add_thread (tp);
397 record_btrace_push_target ();
399 btrace_disable.discard ();
402 /* The stop_recording method of target record-btrace. */
405 record_btrace_target::stop_recording ()
407 DEBUG ("stop recording");
409 record_btrace_auto_disable ();
411 for (thread_info *tp : all_non_exited_threads ())
412 if (tp->btrace.target != NULL)
416 /* The disconnect method of target record-btrace. */
419 record_btrace_target::disconnect (const char *args,
422 struct target_ops *beneath = this->beneath ();
424 /* Do not stop recording, just clean up GDB side. */
425 unpush_target (this);
427 /* Forward disconnect. */
428 beneath->disconnect (args, from_tty);
431 /* The close method of target record-btrace. */
434 record_btrace_target::close ()
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
445 for (thread_info *tp : all_non_exited_threads ())
446 btrace_teardown (tp);
449 /* The async method of target record-btrace. */
452 record_btrace_target::async (int enable)
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459 this->beneath ()->async (enable);
462 /* Adjusts the size and returns a human readable size suffix. */
465 record_btrace_adjust_size (unsigned int *size)
471 if ((sz & ((1u << 30) - 1)) == 0)
476 else if ((sz & ((1u << 20) - 1)) == 0)
481 else if ((sz & ((1u << 10) - 1)) == 0)
490 /* Print a BTS configuration. */
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
506 /* Print an Intel Processor Trace configuration. */
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
522 /* Print a branch tracing configuration. */
525 record_btrace_print_conf (const struct btrace_config *conf)
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
530 switch (conf->format)
532 case BTRACE_FORMAT_NONE:
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
547 /* The info_record method of target record-btrace. */
550 record_btrace_target::info_record ()
552 struct btrace_thread_info *btinfo;
553 const struct btrace_config *conf;
554 struct thread_info *tp;
555 unsigned int insns, calls, gaps;
559 tp = find_thread_ptid (inferior_ptid);
561 error (_("No thread."));
563 validate_registers_access ();
565 btinfo = &tp->btrace;
567 conf = ::btrace_conf (btinfo);
569 record_btrace_print_conf (conf);
571 btrace_fetch (tp, record_btrace_get_cpu ());
577 if (!btrace_is_empty (tp))
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
584 calls = btrace_call_number (&call);
586 btrace_insn_end (&insn, btinfo);
587 insns = btrace_insn_number (&insn);
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
594 gaps = btinfo->ngaps;
597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
598 "for thread %s (%s).\n"), insns, calls, gaps,
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
607 /* Print a decode error. */
610 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
613 const char *errstr = btrace_decode_error (format, errcode);
615 uiout->text (_("["));
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
627 /* Print an unsigned int. */
630 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
632 uiout->field_fmt (fld, "%u", val);
635 /* A range of source lines. */
637 struct btrace_line_range
639 /* The symtab this line is from. */
640 struct symtab *symtab;
642 /* The first line (inclusive). */
645 /* The last line (exclusive). */
649 /* Construct a line range. */
651 static struct btrace_line_range
652 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
654 struct btrace_line_range range;
656 range.symtab = symtab;
663 /* Add a line to a line range. */
665 static struct btrace_line_range
666 btrace_line_range_add (struct btrace_line_range range, int line)
668 if (range.end <= range.begin)
670 /* This is the first entry. */
672 range.end = line + 1;
674 else if (line < range.begin)
676 else if (range.end < line)
682 /* Return non-zero if RANGE is empty, zero otherwise. */
685 btrace_line_range_is_empty (struct btrace_line_range range)
687 return range.end <= range.begin;
690 /* Return non-zero if LHS contains RHS, zero otherwise. */
693 btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
701 /* Find the line range associated with PC. */
703 static struct btrace_line_range
704 btrace_find_line_range (CORE_ADDR pc)
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
712 symtab = find_pc_line_symtab (pc);
714 return btrace_mk_line_range (NULL, 0, 0);
716 ltable = SYMTAB_LINETABLE (symtab);
718 return btrace_mk_line_range (symtab, 0, 0);
720 nlines = ltable->nitems;
721 lines = ltable->item;
723 return btrace_mk_line_range (symtab, 0, 0);
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
728 if ((lines[i].pc == pc) && (lines[i].line != 0))
729 range = btrace_line_range_add (range, lines[i].line);
735 /* Print source lines in LINES to UIOUT.
737 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
738 instructions corresponding to that source line. When printing a new source
739 line, we do the cleanups for the open chain and open a new cleanup chain for
740 the new source line. If the source line range in LINES is not empty, this
741 function will leave the cleanup chain for the last printed source line open
742 so instructions can be added to it. */
745 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
746 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
747 gdb::optional<ui_out_emit_list> *asm_list,
748 gdb_disassembly_flags flags)
750 print_source_lines_flags psl_flags;
752 if (flags & DISASSEMBLY_FILENAME)
753 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
755 for (int line = lines.begin; line < lines.end; ++line)
759 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
761 print_source_lines (lines.symtab, line, line + 1, psl_flags);
763 asm_list->emplace (uiout, "line_asm_insn");
767 /* Disassemble a section of the recorded instruction trace. */
770 btrace_insn_history (struct ui_out *uiout,
771 const struct btrace_thread_info *btinfo,
772 const struct btrace_insn_iterator *begin,
773 const struct btrace_insn_iterator *end,
774 gdb_disassembly_flags flags)
776 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
777 btrace_insn_number (begin), btrace_insn_number (end));
779 flags |= DISASSEMBLY_SPECULATIVE;
781 struct gdbarch *gdbarch = target_gdbarch ();
782 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
784 ui_out_emit_list list_emitter (uiout, "asm_insns");
786 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
787 gdb::optional<ui_out_emit_list> asm_list;
789 gdb_pretty_print_disassembler disasm (gdbarch);
791 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
792 btrace_insn_next (&it, 1))
794 const struct btrace_insn *insn;
796 insn = btrace_insn_get (&it);
798 /* A NULL instruction indicates a gap in the trace. */
801 const struct btrace_config *conf;
803 conf = btrace_conf (btinfo);
805 /* We have trace so we must have a configuration. */
806 gdb_assert (conf != NULL);
808 uiout->field_fmt ("insn-number", "%u",
809 btrace_insn_number (&it));
812 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
817 struct disasm_insn dinsn;
819 if ((flags & DISASSEMBLY_SOURCE) != 0)
821 struct btrace_line_range lines;
823 lines = btrace_find_line_range (insn->pc);
824 if (!btrace_line_range_is_empty (lines)
825 && !btrace_line_range_contains_range (last_lines, lines))
827 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
831 else if (!src_and_asm_tuple.has_value ())
833 gdb_assert (!asm_list.has_value ());
835 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
837 /* No source information. */
838 asm_list.emplace (uiout, "line_asm_insn");
841 gdb_assert (src_and_asm_tuple.has_value ());
842 gdb_assert (asm_list.has_value ());
845 memset (&dinsn, 0, sizeof (dinsn));
846 dinsn.number = btrace_insn_number (&it);
847 dinsn.addr = insn->pc;
849 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
850 dinsn.is_speculative = 1;
852 disasm.pretty_print_insn (uiout, &dinsn, flags);
857 /* The insn_history method of target record-btrace. */
860 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
862 struct btrace_thread_info *btinfo;
863 struct btrace_insn_history *history;
864 struct btrace_insn_iterator begin, end;
865 struct ui_out *uiout;
866 unsigned int context, covered;
868 uiout = current_uiout;
869 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
870 context = abs (size);
872 error (_("Bad record instruction-history-size."));
874 btinfo = require_btrace ();
875 history = btinfo->insn_history;
878 struct btrace_insn_iterator *replay;
880 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
882 /* If we're replaying, we start at the replay position. Otherwise, we
883 start at the tail of the trace. */
884 replay = btinfo->replay;
888 btrace_insn_end (&begin, btinfo);
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
896 /* We want the current position covered, as well. */
897 covered = btrace_insn_next (&end, 1);
898 covered += btrace_insn_prev (&begin, context - covered);
899 covered += btrace_insn_next (&end, context - covered);
903 covered = btrace_insn_next (&end, context);
904 covered += btrace_insn_prev (&begin, context - covered);
909 begin = history->begin;
912 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
913 btrace_insn_number (&begin), btrace_insn_number (&end));
918 covered = btrace_insn_prev (&begin, context);
923 covered = btrace_insn_next (&end, context);
928 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
937 btrace_set_insn_history (btinfo, &begin, &end);
940 /* The insn_history_range method of target record-btrace. */
943 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
944 gdb_disassembly_flags flags)
946 struct btrace_thread_info *btinfo;
947 struct btrace_insn_iterator begin, end;
948 struct ui_out *uiout;
949 unsigned int low, high;
952 uiout = current_uiout;
953 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
957 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
959 /* Check for wrap-arounds. */
960 if (low != from || high != to)
961 error (_("Bad range."));
964 error (_("Bad range."));
966 btinfo = require_btrace ();
968 found = btrace_find_insn_by_number (&begin, btinfo, low);
970 error (_("Range out of bounds."));
972 found = btrace_find_insn_by_number (&end, btinfo, high);
975 /* Silently truncate the range. */
976 btrace_insn_end (&end, btinfo);
980 /* We want both begin and end to be inclusive. */
981 btrace_insn_next (&end, 1);
984 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
985 btrace_set_insn_history (btinfo, &begin, &end);
988 /* The insn_history_from method of target record-btrace. */
991 record_btrace_target::insn_history_from (ULONGEST from, int size,
992 gdb_disassembly_flags flags)
994 ULONGEST begin, end, context;
996 context = abs (size);
998 error (_("Bad record instruction-history-size."));
1007 begin = from - context + 1;
1012 end = from + context - 1;
1014 /* Check for wrap-around. */
1019 insn_history_range (begin, end, flags);
1022 /* Print the instruction number range for a function call history line. */
1025 btrace_call_history_insn_range (struct ui_out *uiout,
1026 const struct btrace_function *bfun)
1028 unsigned int begin, end, size;
1030 size = bfun->insn.size ();
1031 gdb_assert (size > 0);
1033 begin = bfun->insn_offset;
1034 end = begin + size - 1;
1036 ui_out_field_uint (uiout, "insn begin", begin);
1038 ui_out_field_uint (uiout, "insn end", end);
1041 /* Compute the lowest and highest source line for the instructions in BFUN
1042 and return them in PBEGIN and PEND.
1043 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1044 result from inlining or macro expansion. */
1047 btrace_compute_src_line_range (const struct btrace_function *bfun,
1048 int *pbegin, int *pend)
1050 struct symtab *symtab;
1061 symtab = symbol_symtab (sym);
1063 for (const btrace_insn &insn : bfun->insn)
1065 struct symtab_and_line sal;
1067 sal = find_pc_line (insn.pc, 0);
1068 if (sal.symtab != symtab || sal.line == 0)
1071 begin = std::min (begin, sal.line);
1072 end = std::max (end, sal.line);
1080 /* Print the source line information for a function call history line. */
1083 btrace_call_history_src_line (struct ui_out *uiout,
1084 const struct btrace_function *bfun)
1093 uiout->field_string ("file",
1094 symtab_to_filename_for_display (symbol_symtab (sym)),
1095 ui_out_style_kind::FILE);
1097 btrace_compute_src_line_range (bfun, &begin, &end);
1102 uiout->field_int ("min line", begin);
1108 uiout->field_int ("max line", end);
1111 /* Get the name of a branch trace function. */
1114 btrace_get_bfun_name (const struct btrace_function *bfun)
1116 struct minimal_symbol *msym;
1126 return SYMBOL_PRINT_NAME (sym);
1127 else if (msym != NULL)
1128 return MSYMBOL_PRINT_NAME (msym);
1133 /* Disassemble a section of the recorded function trace. */
1136 btrace_call_history (struct ui_out *uiout,
1137 const struct btrace_thread_info *btinfo,
1138 const struct btrace_call_iterator *begin,
1139 const struct btrace_call_iterator *end,
1142 struct btrace_call_iterator it;
1143 record_print_flags flags = (enum record_print_flag) int_flags;
1145 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1146 btrace_call_number (end));
1148 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1150 const struct btrace_function *bfun;
1151 struct minimal_symbol *msym;
1154 bfun = btrace_call_get (&it);
1158 /* Print the function index. */
1159 ui_out_field_uint (uiout, "index", bfun->number);
1162 /* Indicate gaps in the trace. */
1163 if (bfun->errcode != 0)
1165 const struct btrace_config *conf;
1167 conf = btrace_conf (btinfo);
1169 /* We have trace so we must have a configuration. */
1170 gdb_assert (conf != NULL);
1172 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1177 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1179 int level = bfun->level + btinfo->level, i;
1181 for (i = 0; i < level; ++i)
1186 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1187 ui_out_style_kind::FUNCTION);
1188 else if (msym != NULL)
1189 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1190 ui_out_style_kind::FUNCTION);
1191 else if (!uiout->is_mi_like_p ())
1192 uiout->field_string ("function", "??",
1193 ui_out_style_kind::FUNCTION);
1195 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1197 uiout->text (_("\tinst "));
1198 btrace_call_history_insn_range (uiout, bfun);
1201 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1203 uiout->text (_("\tat "));
1204 btrace_call_history_src_line (uiout, bfun);
1211 /* The call_history method of target record-btrace. */
1214 record_btrace_target::call_history (int size, record_print_flags flags)
1216 struct btrace_thread_info *btinfo;
1217 struct btrace_call_history *history;
1218 struct btrace_call_iterator begin, end;
1219 struct ui_out *uiout;
1220 unsigned int context, covered;
1222 uiout = current_uiout;
1223 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1224 context = abs (size);
1226 error (_("Bad record function-call-history-size."));
1228 btinfo = require_btrace ();
1229 history = btinfo->call_history;
1230 if (history == NULL)
1232 struct btrace_insn_iterator *replay;
1234 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1236 /* If we're replaying, we start at the replay position. Otherwise, we
1237 start at the tail of the trace. */
1238 replay = btinfo->replay;
1241 begin.btinfo = btinfo;
1242 begin.index = replay->call_index;
1245 btrace_call_end (&begin, btinfo);
1247 /* We start from here and expand in the requested direction. Then we
1248 expand in the other direction, as well, to fill up any remaining
1253 /* We want the current position covered, as well. */
1254 covered = btrace_call_next (&end, 1);
1255 covered += btrace_call_prev (&begin, context - covered);
1256 covered += btrace_call_next (&end, context - covered);
1260 covered = btrace_call_next (&end, context);
1261 covered += btrace_call_prev (&begin, context- covered);
1266 begin = history->begin;
1269 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1270 btrace_call_number (&begin), btrace_call_number (&end));
1275 covered = btrace_call_prev (&begin, context);
1280 covered = btrace_call_next (&end, context);
1285 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1289 printf_unfiltered (_("At the start of the branch trace record.\n"));
1291 printf_unfiltered (_("At the end of the branch trace record.\n"));
1294 btrace_set_call_history (btinfo, &begin, &end);
1297 /* The call_history_range method of target record-btrace. */
1300 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1301 record_print_flags flags)
1303 struct btrace_thread_info *btinfo;
1304 struct btrace_call_iterator begin, end;
1305 struct ui_out *uiout;
1306 unsigned int low, high;
1309 uiout = current_uiout;
1310 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1314 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1316 /* Check for wrap-arounds. */
1317 if (low != from || high != to)
1318 error (_("Bad range."));
1321 error (_("Bad range."));
1323 btinfo = require_btrace ();
1325 found = btrace_find_call_by_number (&begin, btinfo, low);
1327 error (_("Range out of bounds."));
1329 found = btrace_find_call_by_number (&end, btinfo, high);
1332 /* Silently truncate the range. */
1333 btrace_call_end (&end, btinfo);
1337 /* We want both begin and end to be inclusive. */
1338 btrace_call_next (&end, 1);
1341 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1342 btrace_set_call_history (btinfo, &begin, &end);
1345 /* The call_history_from method of target record-btrace. */
1348 record_btrace_target::call_history_from (ULONGEST from, int size,
1349 record_print_flags flags)
1351 ULONGEST begin, end, context;
1353 context = abs (size);
1355 error (_("Bad record function-call-history-size."));
1364 begin = from - context + 1;
1369 end = from + context - 1;
1371 /* Check for wrap-around. */
1376 call_history_range ( begin, end, flags);
1379 /* The record_method method of target record-btrace. */
1382 record_btrace_target::record_method (ptid_t ptid)
1384 struct thread_info * const tp = find_thread_ptid (ptid);
1387 error (_("No thread."));
1389 if (tp->btrace.target == NULL)
1390 return RECORD_METHOD_NONE;
1392 return RECORD_METHOD_BTRACE;
1395 /* The record_is_replaying method of target record-btrace. */
1398 record_btrace_target::record_is_replaying (ptid_t ptid)
1400 for (thread_info *tp : all_non_exited_threads (ptid))
1401 if (btrace_is_replaying (tp))
1407 /* The record_will_replay method of target record-btrace. */
1410 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1412 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1415 /* The xfer_partial method of target record-btrace. */
1417 enum target_xfer_status
1418 record_btrace_target::xfer_partial (enum target_object object,
1419 const char *annex, gdb_byte *readbuf,
1420 const gdb_byte *writebuf, ULONGEST offset,
1421 ULONGEST len, ULONGEST *xfered_len)
1423 /* Filter out requests that don't make sense during replay. */
1424 if (replay_memory_access == replay_memory_access_read_only
1425 && !record_btrace_generating_corefile
1426 && record_is_replaying (inferior_ptid))
1430 case TARGET_OBJECT_MEMORY:
1432 struct target_section *section;
1434 /* We do not allow writing memory in general. */
1435 if (writebuf != NULL)
1438 return TARGET_XFER_UNAVAILABLE;
1441 /* We allow reading readonly memory. */
1442 section = target_section_by_addr (this, offset);
1443 if (section != NULL)
1445 /* Check if the section we found is readonly. */
1446 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1447 section->the_bfd_section)
1448 & SEC_READONLY) != 0)
1450 /* Truncate the request to fit into this section. */
1451 len = std::min (len, section->endaddr - offset);
1457 return TARGET_XFER_UNAVAILABLE;
1462 /* Forward the request. */
1463 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1464 offset, len, xfered_len);
1467 /* The insert_breakpoint method of target record-btrace. */
1470 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1471 struct bp_target_info *bp_tgt)
1476 /* Inserting breakpoints requires accessing memory. Allow it for the
1477 duration of this function. */
1478 old = replay_memory_access;
1479 replay_memory_access = replay_memory_access_read_write;
1484 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1486 catch (const gdb_exception &except)
1488 replay_memory_access = old;
1491 replay_memory_access = old;
1496 /* The remove_breakpoint method of target record-btrace. */
1499 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1500 struct bp_target_info *bp_tgt,
1501 enum remove_bp_reason reason)
1506 /* Removing breakpoints requires accessing memory. Allow it for the
1507 duration of this function. */
1508 old = replay_memory_access;
1509 replay_memory_access = replay_memory_access_read_write;
1514 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1516 catch (const gdb_exception &except)
1518 replay_memory_access = old;
1521 replay_memory_access = old;
1526 /* The fetch_registers method of target record-btrace. */
1529 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1531 struct btrace_insn_iterator *replay;
1532 struct thread_info *tp;
1534 tp = find_thread_ptid (regcache->ptid ());
1535 gdb_assert (tp != NULL);
1537 replay = tp->btrace.replay;
1538 if (replay != NULL && !record_btrace_generating_corefile)
1540 const struct btrace_insn *insn;
1541 struct gdbarch *gdbarch;
1544 gdbarch = regcache->arch ();
1545 pcreg = gdbarch_pc_regnum (gdbarch);
1549 /* We can only provide the PC register. */
1550 if (regno >= 0 && regno != pcreg)
1553 insn = btrace_insn_get (replay);
1554 gdb_assert (insn != NULL);
1556 regcache->raw_supply (regno, &insn->pc);
1559 this->beneath ()->fetch_registers (regcache, regno);
1562 /* The store_registers method of target record-btrace. */
1565 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1567 if (!record_btrace_generating_corefile
1568 && record_is_replaying (regcache->ptid ()))
1569 error (_("Cannot write registers while replaying."));
1571 gdb_assert (may_write_registers != 0);
1573 this->beneath ()->store_registers (regcache, regno);
1576 /* The prepare_to_store method of target record-btrace. */
1579 record_btrace_target::prepare_to_store (struct regcache *regcache)
1581 if (!record_btrace_generating_corefile
1582 && record_is_replaying (regcache->ptid ()))
1585 this->beneath ()->prepare_to_store (regcache);
1588 /* The branch trace frame cache. */
1590 struct btrace_frame_cache
1593 struct thread_info *tp;
1595 /* The frame info. */
1596 struct frame_info *frame;
1598 /* The branch trace function segment. */
1599 const struct btrace_function *bfun;
1602 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1604 static htab_t bfcache;
1606 /* hash_f for htab_create_alloc of bfcache. */
1609 bfcache_hash (const void *arg)
1611 const struct btrace_frame_cache *cache
1612 = (const struct btrace_frame_cache *) arg;
1614 return htab_hash_pointer (cache->frame);
1617 /* eq_f for htab_create_alloc of bfcache. */
1620 bfcache_eq (const void *arg1, const void *arg2)
1622 const struct btrace_frame_cache *cache1
1623 = (const struct btrace_frame_cache *) arg1;
1624 const struct btrace_frame_cache *cache2
1625 = (const struct btrace_frame_cache *) arg2;
1627 return cache1->frame == cache2->frame;
1630 /* Create a new btrace frame cache. */
1632 static struct btrace_frame_cache *
1633 bfcache_new (struct frame_info *frame)
1635 struct btrace_frame_cache *cache;
1638 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1639 cache->frame = frame;
1641 slot = htab_find_slot (bfcache, cache, INSERT);
1642 gdb_assert (*slot == NULL);
1648 /* Extract the branch trace function from a branch trace frame. */
1650 static const struct btrace_function *
1651 btrace_get_frame_function (struct frame_info *frame)
1653 const struct btrace_frame_cache *cache;
1654 struct btrace_frame_cache pattern;
1657 pattern.frame = frame;
1659 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1663 cache = (const struct btrace_frame_cache *) *slot;
1667 /* Implement stop_reason method for record_btrace_frame_unwind. */
1669 static enum unwind_stop_reason
1670 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1673 const struct btrace_frame_cache *cache;
1674 const struct btrace_function *bfun;
1676 cache = (const struct btrace_frame_cache *) *this_cache;
1678 gdb_assert (bfun != NULL);
1681 return UNWIND_UNAVAILABLE;
1683 return UNWIND_NO_REASON;
1686 /* Implement this_id method for record_btrace_frame_unwind. */
1689 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1690 struct frame_id *this_id)
1692 const struct btrace_frame_cache *cache;
1693 const struct btrace_function *bfun;
1694 struct btrace_call_iterator it;
1695 CORE_ADDR code, special;
1697 cache = (const struct btrace_frame_cache *) *this_cache;
1700 gdb_assert (bfun != NULL);
1702 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1703 bfun = btrace_call_get (&it);
1705 code = get_frame_func (this_frame);
1706 special = bfun->number;
1708 *this_id = frame_id_build_unavailable_stack_special (code, special);
1710 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1711 btrace_get_bfun_name (cache->bfun),
1712 core_addr_to_string_nz (this_id->code_addr),
1713 core_addr_to_string_nz (this_id->special_addr));
1716 /* Implement prev_register method for record_btrace_frame_unwind. */
1718 static struct value *
1719 record_btrace_frame_prev_register (struct frame_info *this_frame,
1723 const struct btrace_frame_cache *cache;
1724 const struct btrace_function *bfun, *caller;
1725 struct btrace_call_iterator it;
1726 struct gdbarch *gdbarch;
1730 gdbarch = get_frame_arch (this_frame);
1731 pcreg = gdbarch_pc_regnum (gdbarch);
1732 if (pcreg < 0 || regnum != pcreg)
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("Registers are not available in btrace record history"));
1736 cache = (const struct btrace_frame_cache *) *this_cache;
1738 gdb_assert (bfun != NULL);
1740 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1741 throw_error (NOT_AVAILABLE_ERROR,
1742 _("No caller in btrace record history"));
1744 caller = btrace_call_get (&it);
1746 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1747 pc = caller->insn.front ().pc;
1750 pc = caller->insn.back ().pc;
1751 pc += gdb_insn_length (gdbarch, pc);
1754 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1755 btrace_get_bfun_name (bfun), bfun->level,
1756 core_addr_to_string_nz (pc));
1758 return frame_unwind_got_address (this_frame, regnum, pc);
1761 /* Implement sniffer method for record_btrace_frame_unwind. */
1764 record_btrace_frame_sniffer (const struct frame_unwind *self,
1765 struct frame_info *this_frame,
1768 const struct btrace_function *bfun;
1769 struct btrace_frame_cache *cache;
1770 struct thread_info *tp;
1771 struct frame_info *next;
1773 /* THIS_FRAME does not contain a reference to its thread. */
1774 tp = inferior_thread ();
1777 next = get_next_frame (this_frame);
1780 const struct btrace_insn_iterator *replay;
1782 replay = tp->btrace.replay;
1784 bfun = &replay->btinfo->functions[replay->call_index];
1788 const struct btrace_function *callee;
1789 struct btrace_call_iterator it;
1791 callee = btrace_get_frame_function (next);
1792 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1795 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1798 bfun = btrace_call_get (&it);
1804 DEBUG ("[frame] sniffed frame for %s on level %d",
1805 btrace_get_bfun_name (bfun), bfun->level);
1807 /* This is our frame. Initialize the frame cache. */
1808 cache = bfcache_new (this_frame);
1812 *this_cache = cache;
1816 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1819 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1820 struct frame_info *this_frame,
1823 const struct btrace_function *bfun, *callee;
1824 struct btrace_frame_cache *cache;
1825 struct btrace_call_iterator it;
1826 struct frame_info *next;
1827 struct thread_info *tinfo;
1829 next = get_next_frame (this_frame);
1833 callee = btrace_get_frame_function (next);
1837 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1840 tinfo = inferior_thread ();
1841 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1844 bfun = btrace_call_get (&it);
1846 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1847 btrace_get_bfun_name (bfun), bfun->level);
1849 /* This is our frame. Initialize the frame cache. */
1850 cache = bfcache_new (this_frame);
1854 *this_cache = cache;
1859 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1861 struct btrace_frame_cache *cache;
1864 cache = (struct btrace_frame_cache *) this_cache;
1866 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1867 gdb_assert (slot != NULL);
1869 htab_remove_elt (bfcache, cache);
1872 /* btrace recording does not store previous memory content, neither the stack
1873 frames content. Any unwinding would return errorneous results as the stack
1874 contents no longer matches the changed PC value restored from history.
1875 Therefore this unwinder reports any possibly unwound registers as
1878 const struct frame_unwind record_btrace_frame_unwind =
1881 record_btrace_frame_unwind_stop_reason,
1882 record_btrace_frame_this_id,
1883 record_btrace_frame_prev_register,
1885 record_btrace_frame_sniffer,
1886 record_btrace_frame_dealloc_cache
1889 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1892 record_btrace_frame_unwind_stop_reason,
1893 record_btrace_frame_this_id,
1894 record_btrace_frame_prev_register,
1896 record_btrace_tailcall_frame_sniffer,
1897 record_btrace_frame_dealloc_cache
1900 /* Implement the get_unwinder method. */
1902 const struct frame_unwind *
1903 record_btrace_target::get_unwinder ()
1905 return &record_btrace_frame_unwind;
1908 /* Implement the get_tailcall_unwinder method. */
1910 const struct frame_unwind *
1911 record_btrace_target::get_tailcall_unwinder ()
1913 return &record_btrace_tailcall_frame_unwind;
1916 /* Return a human-readable string for FLAG. */
1919 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1927 return "reverse-step";
1933 return "reverse-cont";
1942 /* Indicate that TP should be resumed according to FLAG. */
1945 record_btrace_resume_thread (struct thread_info *tp,
1946 enum btrace_thread_flag flag)
1948 struct btrace_thread_info *btinfo;
1950 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1951 target_pid_to_str (tp->ptid).c_str (), flag,
1952 btrace_thread_flag_to_str (flag));
1954 btinfo = &tp->btrace;
1956 /* Fetch the latest branch trace. */
1957 btrace_fetch (tp, record_btrace_get_cpu ());
1959 /* A resume request overwrites a preceding resume or stop request. */
1960 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1961 btinfo->flags |= flag;
1964 /* Get the current frame for TP. */
1966 static struct frame_id
1967 get_thread_current_frame_id (struct thread_info *tp)
1972 /* Set current thread, which is implicitly used by
1973 get_current_frame. */
1974 scoped_restore_current_thread restore_thread;
1976 switch_to_thread (tp);
1978 /* Clear the executing flag to allow changes to the current frame.
1979 We are not actually running, yet. We just started a reverse execution
1980 command or a record goto command.
1981 For the latter, EXECUTING is false and this has no effect.
1982 For the former, EXECUTING is true and we're in wait, about to
1983 move the thread. Since we need to recompute the stack, we temporarily
1984 set EXECUTING to flase. */
1985 executing = tp->executing;
1986 set_executing (inferior_ptid, false);
1991 id = get_frame_id (get_current_frame ());
1993 catch (const gdb_exception &except)
1995 /* Restore the previous execution state. */
1996 set_executing (inferior_ptid, executing);
2001 /* Restore the previous execution state. */
2002 set_executing (inferior_ptid, executing);
2007 /* Start replaying a thread. */
2009 static struct btrace_insn_iterator *
2010 record_btrace_start_replaying (struct thread_info *tp)
2012 struct btrace_insn_iterator *replay;
2013 struct btrace_thread_info *btinfo;
2015 btinfo = &tp->btrace;
2018 /* We can't start replaying without trace. */
2019 if (btinfo->functions.empty ())
2022 /* GDB stores the current frame_id when stepping in order to detects steps
2024 Since frames are computed differently when we're replaying, we need to
2025 recompute those stored frames and fix them up so we can still detect
2026 subroutines after we started replaying. */
2029 struct frame_id frame_id;
2030 int upd_step_frame_id, upd_step_stack_frame_id;
2032 /* The current frame without replaying - computed via normal unwind. */
2033 frame_id = get_thread_current_frame_id (tp);
2035 /* Check if we need to update any stepping-related frame id's. */
2036 upd_step_frame_id = frame_id_eq (frame_id,
2037 tp->control.step_frame_id);
2038 upd_step_stack_frame_id = frame_id_eq (frame_id,
2039 tp->control.step_stack_frame_id);
2041 /* We start replaying at the end of the branch trace. This corresponds
2042 to the current instruction. */
2043 replay = XNEW (struct btrace_insn_iterator);
2044 btrace_insn_end (replay, btinfo);
2046 /* Skip gaps at the end of the trace. */
2047 while (btrace_insn_get (replay) == NULL)
2051 steps = btrace_insn_prev (replay, 1);
2053 error (_("No trace."));
2056 /* We're not replaying, yet. */
2057 gdb_assert (btinfo->replay == NULL);
2058 btinfo->replay = replay;
2060 /* Make sure we're not using any stale registers. */
2061 registers_changed_thread (tp);
2063 /* The current frame with replaying - computed via btrace unwind. */
2064 frame_id = get_thread_current_frame_id (tp);
2066 /* Replace stepping related frames where necessary. */
2067 if (upd_step_frame_id)
2068 tp->control.step_frame_id = frame_id;
2069 if (upd_step_stack_frame_id)
2070 tp->control.step_stack_frame_id = frame_id;
2072 catch (const gdb_exception &except)
2074 xfree (btinfo->replay);
2075 btinfo->replay = NULL;
2077 registers_changed_thread (tp);
2085 /* Stop replaying a thread. */
2088 record_btrace_stop_replaying (struct thread_info *tp)
2090 struct btrace_thread_info *btinfo;
2092 btinfo = &tp->btrace;
2094 xfree (btinfo->replay);
2095 btinfo->replay = NULL;
2097 /* Make sure we're not leaving any stale registers. */
2098 registers_changed_thread (tp);
2101 /* Stop replaying TP if it is at the end of its execution history. */
2104 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2106 struct btrace_insn_iterator *replay, end;
2107 struct btrace_thread_info *btinfo;
2109 btinfo = &tp->btrace;
2110 replay = btinfo->replay;
2115 btrace_insn_end (&end, btinfo);
2117 if (btrace_insn_cmp (replay, &end) == 0)
2118 record_btrace_stop_replaying (tp);
2121 /* The resume method of target record-btrace. */
2124 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2126 enum btrace_thread_flag flag, cflag;
2128 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2129 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2130 step ? "step" : "cont");
2132 /* Store the execution direction of the last resume.
2134 If there is more than one resume call, we have to rely on infrun
2135 to not change the execution direction in-between. */
2136 record_btrace_resume_exec_dir = ::execution_direction;
2138 /* As long as we're not replaying, just forward the request.
2140 For non-stop targets this means that no thread is replaying. In order to
2141 make progress, we may need to explicitly move replaying threads to the end
2142 of their execution history. */
2143 if ((::execution_direction != EXEC_REVERSE)
2144 && !record_is_replaying (minus_one_ptid))
2146 this->beneath ()->resume (ptid, step, signal);
2150 /* Compute the btrace thread flag for the requested move. */
2151 if (::execution_direction == EXEC_REVERSE)
2153 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2158 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2162 /* We just indicate the resume intent here. The actual stepping happens in
2163 record_btrace_wait below.
2165 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2166 if (!target_is_non_stop_p ())
2168 gdb_assert (inferior_ptid.matches (ptid));
2170 for (thread_info *tp : all_non_exited_threads (ptid))
2172 if (tp->ptid.matches (inferior_ptid))
2173 record_btrace_resume_thread (tp, flag);
2175 record_btrace_resume_thread (tp, cflag);
2180 for (thread_info *tp : all_non_exited_threads (ptid))
2181 record_btrace_resume_thread (tp, flag);
2184 /* Async support. */
2185 if (target_can_async_p ())
2188 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2192 /* The commit_resume method of target record-btrace. */
2195 record_btrace_target::commit_resume ()
2197 if ((::execution_direction != EXEC_REVERSE)
2198 && !record_is_replaying (minus_one_ptid))
2199 beneath ()->commit_resume ();
2202 /* Cancel resuming TP. */
2205 record_btrace_cancel_resume (struct thread_info *tp)
2207 enum btrace_thread_flag flags;
2209 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2213 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2214 print_thread_id (tp),
2215 target_pid_to_str (tp->ptid).c_str (), flags,
2216 btrace_thread_flag_to_str (flags));
2218 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2219 record_btrace_stop_replaying_at_end (tp);
2222 /* Return a target_waitstatus indicating that we ran out of history. */
2224 static struct target_waitstatus
2225 btrace_step_no_history (void)
2227 struct target_waitstatus status;
2229 status.kind = TARGET_WAITKIND_NO_HISTORY;
2234 /* Return a target_waitstatus indicating that a step finished. */
2236 static struct target_waitstatus
2237 btrace_step_stopped (void)
2239 struct target_waitstatus status;
2241 status.kind = TARGET_WAITKIND_STOPPED;
2242 status.value.sig = GDB_SIGNAL_TRAP;
2247 /* Return a target_waitstatus indicating that a thread was stopped as
2250 static struct target_waitstatus
2251 btrace_step_stopped_on_request (void)
2253 struct target_waitstatus status;
2255 status.kind = TARGET_WAITKIND_STOPPED;
2256 status.value.sig = GDB_SIGNAL_0;
2261 /* Return a target_waitstatus indicating a spurious stop. */
2263 static struct target_waitstatus
2264 btrace_step_spurious (void)
2266 struct target_waitstatus status;
2268 status.kind = TARGET_WAITKIND_SPURIOUS;
2273 /* Return a target_waitstatus indicating that the thread was not resumed. */
2275 static struct target_waitstatus
2276 btrace_step_no_resumed (void)
2278 struct target_waitstatus status;
2280 status.kind = TARGET_WAITKIND_NO_RESUMED;
2285 /* Return a target_waitstatus indicating that we should wait again. */
2287 static struct target_waitstatus
2288 btrace_step_again (void)
2290 struct target_waitstatus status;
2292 status.kind = TARGET_WAITKIND_IGNORE;
2297 /* Clear the record histories. */
2300 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2302 xfree (btinfo->insn_history);
2303 xfree (btinfo->call_history);
2305 btinfo->insn_history = NULL;
2306 btinfo->call_history = NULL;
2309 /* Check whether TP's current replay position is at a breakpoint. */
2312 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2314 struct btrace_insn_iterator *replay;
2315 struct btrace_thread_info *btinfo;
2316 const struct btrace_insn *insn;
2318 btinfo = &tp->btrace;
2319 replay = btinfo->replay;
2324 insn = btrace_insn_get (replay);
2328 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2329 &btinfo->stop_reason);
2332 /* Step one instruction in forward direction. */
2334 static struct target_waitstatus
2335 record_btrace_single_step_forward (struct thread_info *tp)
2337 struct btrace_insn_iterator *replay, end, start;
2338 struct btrace_thread_info *btinfo;
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2343 /* We're done if we're not replaying. */
2345 return btrace_step_no_history ();
2347 /* Check if we're stepping a breakpoint. */
2348 if (record_btrace_replay_at_breakpoint (tp))
2349 return btrace_step_stopped ();
2351 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2352 jump back to the instruction at which we started. */
2358 /* We will bail out here if we continue stepping after reaching the end
2359 of the execution history. */
2360 steps = btrace_insn_next (replay, 1);
2364 return btrace_step_no_history ();
2367 while (btrace_insn_get (replay) == NULL);
2369 /* Determine the end of the instruction trace. */
2370 btrace_insn_end (&end, btinfo);
2372 /* The execution trace contains (and ends with) the current instruction.
2373 This instruction has not been executed, yet, so the trace really ends
2374 one instruction earlier. */
2375 if (btrace_insn_cmp (replay, &end) == 0)
2376 return btrace_step_no_history ();
2378 return btrace_step_spurious ();
2381 /* Step one instruction in backward direction. */
2383 static struct target_waitstatus
2384 record_btrace_single_step_backward (struct thread_info *tp)
2386 struct btrace_insn_iterator *replay, start;
2387 struct btrace_thread_info *btinfo;
2389 btinfo = &tp->btrace;
2390 replay = btinfo->replay;
2392 /* Start replaying if we're not already doing so. */
2394 replay = record_btrace_start_replaying (tp);
2396 /* If we can't step any further, we reached the end of the history.
2397 Skip gaps during replay. If we end up at a gap (at the beginning of
2398 the trace), jump back to the instruction at which we started. */
2404 steps = btrace_insn_prev (replay, 1);
2408 return btrace_step_no_history ();
2411 while (btrace_insn_get (replay) == NULL);
2413 /* Check if we're stepping a breakpoint.
2415 For reverse-stepping, this check is after the step. There is logic in
2416 infrun.c that handles reverse-stepping separately. See, for example,
2417 proceed and adjust_pc_after_break.
2419 This code assumes that for reverse-stepping, PC points to the last
2420 de-executed instruction, whereas for forward-stepping PC points to the
2421 next to-be-executed instruction. */
2422 if (record_btrace_replay_at_breakpoint (tp))
2423 return btrace_step_stopped ();
2425 return btrace_step_spurious ();
2428 /* Step a single thread. */
2430 static struct target_waitstatus
2431 record_btrace_step_thread (struct thread_info *tp)
2433 struct btrace_thread_info *btinfo;
2434 struct target_waitstatus status;
2435 enum btrace_thread_flag flags;
2437 btinfo = &tp->btrace;
2439 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2440 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2442 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2443 target_pid_to_str (tp->ptid).c_str (), flags,
2444 btrace_thread_flag_to_str (flags));
2446 /* We can't step without an execution history. */
2447 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2448 return btrace_step_no_history ();
2453 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2456 return btrace_step_stopped_on_request ();
2459 status = record_btrace_single_step_forward (tp);
2460 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2463 return btrace_step_stopped ();
2466 status = record_btrace_single_step_backward (tp);
2467 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2470 return btrace_step_stopped ();
2473 status = record_btrace_single_step_forward (tp);
2474 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2477 btinfo->flags |= flags;
2478 return btrace_step_again ();
2481 status = record_btrace_single_step_backward (tp);
2482 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2485 btinfo->flags |= flags;
2486 return btrace_step_again ();
2489 /* We keep threads moving at the end of their execution history. The wait
2490 method will stop the thread for whom the event is reported. */
2491 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2492 btinfo->flags |= flags;
2497 /* Announce further events if necessary. */
2500 record_btrace_maybe_mark_async_event
2501 (const std::vector<thread_info *> &moving,
2502 const std::vector<thread_info *> &no_history)
2504 bool more_moving = !moving.empty ();
2505 bool more_no_history = !no_history.empty ();;
2507 if (!more_moving && !more_no_history)
2511 DEBUG ("movers pending");
2513 if (more_no_history)
2514 DEBUG ("no-history pending");
2516 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2519 /* The wait method of target record-btrace. */
2522 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2525 std::vector<thread_info *> moving;
2526 std::vector<thread_info *> no_history;
2528 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2530 /* As long as we're not replaying, just forward the request. */
2531 if ((::execution_direction != EXEC_REVERSE)
2532 && !record_is_replaying (minus_one_ptid))
2534 return this->beneath ()->wait (ptid, status, options);
2537 /* Keep a work list of moving threads. */
2538 for (thread_info *tp : all_non_exited_threads (ptid))
2539 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2540 moving.push_back (tp);
2542 if (moving.empty ())
2544 *status = btrace_step_no_resumed ();
2546 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2547 target_waitstatus_to_string (status).c_str ());
2552 /* Step moving threads one by one, one step each, until either one thread
2553 reports an event or we run out of threads to step.
2555 When stepping more than one thread, chances are that some threads reach
2556 the end of their execution history earlier than others. If we reported
2557 this immediately, all-stop on top of non-stop would stop all threads and
2558 resume the same threads next time. And we would report the same thread
2559 having reached the end of its execution history again.
2561 In the worst case, this would starve the other threads. But even if other
2562 threads would be allowed to make progress, this would result in far too
2563 many intermediate stops.
2565 We therefore delay the reporting of "no execution history" until we have
2566 nothing else to report. By this time, all threads should have moved to
2567 either the beginning or the end of their execution history. There will
2568 be a single user-visible stop. */
2569 struct thread_info *eventing = NULL;
2570 while ((eventing == NULL) && !moving.empty ())
2572 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2574 thread_info *tp = moving[ix];
2576 *status = record_btrace_step_thread (tp);
2578 switch (status->kind)
2580 case TARGET_WAITKIND_IGNORE:
2584 case TARGET_WAITKIND_NO_HISTORY:
2585 no_history.push_back (ordered_remove (moving, ix));
2589 eventing = unordered_remove (moving, ix);
2595 if (eventing == NULL)
2597 /* We started with at least one moving thread. This thread must have
2598 either stopped or reached the end of its execution history.
2600 In the former case, EVENTING must not be NULL.
2601 In the latter case, NO_HISTORY must not be empty. */
2602 gdb_assert (!no_history.empty ());
2604 /* We kept threads moving at the end of their execution history. Stop
2605 EVENTING now that we are going to report its stop. */
2606 eventing = unordered_remove (no_history, 0);
2607 eventing->btrace.flags &= ~BTHR_MOVE;
2609 *status = btrace_step_no_history ();
2612 gdb_assert (eventing != NULL);
2614 /* We kept threads replaying at the end of their execution history. Stop
2615 replaying EVENTING now that we are going to report its stop. */
2616 record_btrace_stop_replaying_at_end (eventing);
2618 /* Stop all other threads. */
2619 if (!target_is_non_stop_p ())
2621 for (thread_info *tp : all_non_exited_threads ())
2622 record_btrace_cancel_resume (tp);
2625 /* In async mode, we need to announce further events. */
2626 if (target_is_async_p ())
2627 record_btrace_maybe_mark_async_event (moving, no_history);
2629 /* Start record histories anew from the current position. */
2630 record_btrace_clear_histories (&eventing->btrace);
2632 /* We moved the replay position but did not update registers. */
2633 registers_changed_thread (eventing);
2635 DEBUG ("wait ended by thread %s (%s): %s",
2636 print_thread_id (eventing),
2637 target_pid_to_str (eventing->ptid).c_str (),
2638 target_waitstatus_to_string (status).c_str ());
2640 return eventing->ptid;
2643 /* The stop method of target record-btrace. */
2646 record_btrace_target::stop (ptid_t ptid)
2648 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2650 /* As long as we're not replaying, just forward the request. */
2651 if ((::execution_direction != EXEC_REVERSE)
2652 && !record_is_replaying (minus_one_ptid))
2654 this->beneath ()->stop (ptid);
2658 for (thread_info *tp : all_non_exited_threads (ptid))
2660 tp->btrace.flags &= ~BTHR_MOVE;
2661 tp->btrace.flags |= BTHR_STOP;
2666 /* The can_execute_reverse method of target record-btrace. */
2669 record_btrace_target::can_execute_reverse ()
2674 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2677 record_btrace_target::stopped_by_sw_breakpoint ()
2679 if (record_is_replaying (minus_one_ptid))
2681 struct thread_info *tp = inferior_thread ();
2683 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2686 return this->beneath ()->stopped_by_sw_breakpoint ();
2689 /* The supports_stopped_by_sw_breakpoint method of target
2693 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2695 if (record_is_replaying (minus_one_ptid))
2698 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2701 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2704 record_btrace_target::stopped_by_hw_breakpoint ()
2706 if (record_is_replaying (minus_one_ptid))
2708 struct thread_info *tp = inferior_thread ();
2710 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2713 return this->beneath ()->stopped_by_hw_breakpoint ();
2716 /* The supports_stopped_by_hw_breakpoint method of target
2720 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2722 if (record_is_replaying (minus_one_ptid))
2725 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2728 /* The update_thread_list method of target record-btrace. */
2731 record_btrace_target::update_thread_list ()
2733 /* We don't add or remove threads during replay. */
2734 if (record_is_replaying (minus_one_ptid))
2737 /* Forward the request. */
2738 this->beneath ()->update_thread_list ();
2741 /* The thread_alive method of target record-btrace. */
2744 record_btrace_target::thread_alive (ptid_t ptid)
2746 /* We don't add or remove threads during replay. */
2747 if (record_is_replaying (minus_one_ptid))
2750 /* Forward the request. */
2751 return this->beneath ()->thread_alive (ptid);
2754 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2758 record_btrace_set_replay (struct thread_info *tp,
2759 const struct btrace_insn_iterator *it)
2761 struct btrace_thread_info *btinfo;
2763 btinfo = &tp->btrace;
2766 record_btrace_stop_replaying (tp);
2769 if (btinfo->replay == NULL)
2770 record_btrace_start_replaying (tp);
2771 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2774 *btinfo->replay = *it;
2775 registers_changed_thread (tp);
2778 /* Start anew from the new replay position. */
2779 record_btrace_clear_histories (btinfo);
2781 inferior_thread ()->suspend.stop_pc
2782 = regcache_read_pc (get_current_regcache ());
2783 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2786 /* The goto_record_begin method of target record-btrace. */
2789 record_btrace_target::goto_record_begin ()
2791 struct thread_info *tp;
2792 struct btrace_insn_iterator begin;
2794 tp = require_btrace_thread ();
2796 btrace_insn_begin (&begin, &tp->btrace);
2798 /* Skip gaps at the beginning of the trace. */
2799 while (btrace_insn_get (&begin) == NULL)
2803 steps = btrace_insn_next (&begin, 1);
2805 error (_("No trace."));
2808 record_btrace_set_replay (tp, &begin);
2811 /* The goto_record_end method of target record-btrace. */
2814 record_btrace_target::goto_record_end ()
2816 struct thread_info *tp;
2818 tp = require_btrace_thread ();
2820 record_btrace_set_replay (tp, NULL);
2823 /* The goto_record method of target record-btrace. */
2826 record_btrace_target::goto_record (ULONGEST insn)
2828 struct thread_info *tp;
2829 struct btrace_insn_iterator it;
2830 unsigned int number;
2835 /* Check for wrap-arounds. */
2837 error (_("Instruction number out of range."));
2839 tp = require_btrace_thread ();
2841 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2843 /* Check if the instruction could not be found or is a gap. */
2844 if (found == 0 || btrace_insn_get (&it) == NULL)
2845 error (_("No such instruction."));
2847 record_btrace_set_replay (tp, &it);
2850 /* The record_stop_replaying method of target record-btrace. */
2853 record_btrace_target::record_stop_replaying ()
2855 for (thread_info *tp : all_non_exited_threads ())
2856 record_btrace_stop_replaying (tp);
2859 /* The execution_direction target method. */
2861 enum exec_direction_kind
2862 record_btrace_target::execution_direction ()
2864 return record_btrace_resume_exec_dir;
2867 /* The prepare_to_generate_core target method. */
2870 record_btrace_target::prepare_to_generate_core ()
2872 record_btrace_generating_corefile = 1;
2875 /* The done_generating_core target method. */
2878 record_btrace_target::done_generating_core ()
2880 record_btrace_generating_corefile = 0;
2883 /* Start recording in BTS format. */
2886 cmd_record_btrace_bts_start (const char *args, int from_tty)
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2891 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2895 execute_command ("target record-btrace", from_tty);
2897 catch (const gdb_exception &exception)
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2904 /* Start recording in Intel Processor Trace format. */
2907 cmd_record_btrace_pt_start (const char *args, int from_tty)
2909 if (args != NULL && *args != 0)
2910 error (_("Invalid argument."));
2912 record_btrace_conf.format = BTRACE_FORMAT_PT;
2916 execute_command ("target record-btrace", from_tty);
2918 catch (const gdb_exception &exception)
2920 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2925 /* Alias for "target record". */
2928 cmd_record_btrace_start (const char *args, int from_tty)
2930 if (args != NULL && *args != 0)
2931 error (_("Invalid argument."));
2933 record_btrace_conf.format = BTRACE_FORMAT_PT;
2937 execute_command ("target record-btrace", from_tty);
2939 catch (const gdb_exception &exception)
2941 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2945 execute_command ("target record-btrace", from_tty);
2947 catch (const gdb_exception &ex)
2949 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2955 /* The "set record btrace" command. */
2958 cmd_set_record_btrace (const char *args, int from_tty)
2960 printf_unfiltered (_("\"set record btrace\" must be followed "
2961 "by an appropriate subcommand.\n"));
2962 help_list (set_record_btrace_cmdlist, "set record btrace ",
2963 all_commands, gdb_stdout);
2966 /* The "show record btrace" command. */
2969 cmd_show_record_btrace (const char *args, int from_tty)
2971 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2974 /* The "show record btrace replay-memory-access" command. */
2977 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2978 struct cmd_list_element *c, const char *value)
2980 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2981 replay_memory_access);
2984 /* The "set record btrace cpu none" command. */
2987 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2989 if (args != nullptr && *args != 0)
2990 error (_("Trailing junk: '%s'."), args);
2992 record_btrace_cpu_state = CS_NONE;
2995 /* The "set record btrace cpu auto" command. */
2998 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3000 if (args != nullptr && *args != 0)
3001 error (_("Trailing junk: '%s'."), args);
3003 record_btrace_cpu_state = CS_AUTO;
3006 /* The "set record btrace cpu" command. */
3009 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3011 if (args == nullptr)
3014 /* We use a hard-coded vendor string for now. */
3015 unsigned int family, model, stepping;
3016 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3017 &model, &l1, &stepping, &l2);
3020 if (strlen (args) != l2)
3021 error (_("Trailing junk: '%s'."), args + l2);
3023 else if (matches == 2)
3025 if (strlen (args) != l1)
3026 error (_("Trailing junk: '%s'."), args + l1);
3031 error (_("Bad format. See \"help set record btrace cpu\"."));
3033 if (USHRT_MAX < family)
3034 error (_("Cpu family too big."));
3036 if (UCHAR_MAX < model)
3037 error (_("Cpu model too big."));
3039 if (UCHAR_MAX < stepping)
3040 error (_("Cpu stepping too big."));
3042 record_btrace_cpu.vendor = CV_INTEL;
3043 record_btrace_cpu.family = family;
3044 record_btrace_cpu.model = model;
3045 record_btrace_cpu.stepping = stepping;
3047 record_btrace_cpu_state = CS_CPU;
3050 /* The "show record btrace cpu" command. */
3053 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3055 if (args != nullptr && *args != 0)
3056 error (_("Trailing junk: '%s'."), args);
3058 switch (record_btrace_cpu_state)
3061 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3065 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3069 switch (record_btrace_cpu.vendor)
3072 if (record_btrace_cpu.stepping == 0)
3073 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3074 record_btrace_cpu.family,
3075 record_btrace_cpu.model);
3077 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3078 record_btrace_cpu.family,
3079 record_btrace_cpu.model,
3080 record_btrace_cpu.stepping);
3085 error (_("Internal error: bad cpu state."));
3088 /* The "s record btrace bts" command. */
3091 cmd_set_record_btrace_bts (const char *args, int from_tty)
3093 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3094 "by an appropriate subcommand.\n"));
3095 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3096 all_commands, gdb_stdout);
3099 /* The "show record btrace bts" command. */
3102 cmd_show_record_btrace_bts (const char *args, int from_tty)
3104 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3107 /* The "set record btrace pt" command. */
3110 cmd_set_record_btrace_pt (const char *args, int from_tty)
3112 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3113 "by an appropriate subcommand.\n"));
3114 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3115 all_commands, gdb_stdout);
3118 /* The "show record btrace pt" command. */
3121 cmd_show_record_btrace_pt (const char *args, int from_tty)
3123 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3126 /* The "record bts buffer-size" show value function. */
3129 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3130 struct cmd_list_element *c,
3133 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3137 /* The "record pt buffer-size" show value function. */
3140 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3141 struct cmd_list_element *c,
3144 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3148 /* Initialize btrace commands. */
3151 _initialize_record_btrace (void)
3153 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3154 _("Start branch trace recording."), &record_btrace_cmdlist,
3155 "record btrace ", 0, &record_cmdlist);
3156 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3158 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3160 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3161 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3162 This format may not be available on all processors."),
3163 &record_btrace_cmdlist);
3164 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3166 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3168 Start branch trace recording in Intel Processor Trace format.\n\n\
3169 This format may not be available on all processors."),
3170 &record_btrace_cmdlist);
3171 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3173 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3174 _("Set record options"), &set_record_btrace_cmdlist,
3175 "set record btrace ", 0, &set_record_cmdlist);
3177 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3178 _("Show record options"), &show_record_btrace_cmdlist,
3179 "show record btrace ", 0, &show_record_cmdlist);
3181 add_setshow_enum_cmd ("replay-memory-access", no_class,
3182 replay_memory_access_types, &replay_memory_access, _("\
3183 Set what memory accesses are allowed during replay."), _("\
3184 Show what memory accesses are allowed during replay."),
3185 _("Default is READ-ONLY.\n\n\
3186 The btrace record target does not trace data.\n\
3187 The memory therefore corresponds to the live target and not \
3188 to the current replay position.\n\n\
3189 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3190 When READ-WRITE, allow accesses to read-only and read-write memory during \
3192 NULL, cmd_show_replay_memory_access,
3193 &set_record_btrace_cmdlist,
3194 &show_record_btrace_cmdlist);
3196 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3198 Set the cpu to be used for trace decode.\n\n\
3199 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3200 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3201 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3202 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3203 When GDB does not support that cpu, this option can be used to enable\n\
3204 workarounds for a similar cpu that GDB supports.\n\n\
3205 When set to \"none\", errata workarounds are disabled."),
3206 &set_record_btrace_cpu_cmdlist,
3207 _("set record btrace cpu "), 1,
3208 &set_record_btrace_cmdlist);
3210 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3211 Automatically determine the cpu to be used for trace decode."),
3212 &set_record_btrace_cpu_cmdlist);
3214 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3215 Do not enable errata workarounds for trace decode."),
3216 &set_record_btrace_cpu_cmdlist);
3218 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3219 Show the cpu to be used for trace decode."),
3220 &show_record_btrace_cmdlist);
3222 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3223 _("Set record btrace bts options"),
3224 &set_record_btrace_bts_cmdlist,
3225 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3227 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3228 _("Show record btrace bts options"),
3229 &show_record_btrace_bts_cmdlist,
3230 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3232 add_setshow_uinteger_cmd ("buffer-size", no_class,
3233 &record_btrace_conf.bts.size,
3234 _("Set the record/replay bts buffer size."),
3235 _("Show the record/replay bts buffer size."), _("\
3236 When starting recording request a trace buffer of this size. \
3237 The actual buffer size may differ from the requested size. \
3238 Use \"info record\" to see the actual buffer size.\n\n\
3239 Bigger buffers allow longer recording but also take more time to process \
3240 the recorded execution trace.\n\n\
3241 The trace buffer size may not be changed while recording."), NULL,
3242 show_record_bts_buffer_size_value,
3243 &set_record_btrace_bts_cmdlist,
3244 &show_record_btrace_bts_cmdlist);
3246 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3247 _("Set record btrace pt options"),
3248 &set_record_btrace_pt_cmdlist,
3249 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3251 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3252 _("Show record btrace pt options"),
3253 &show_record_btrace_pt_cmdlist,
3254 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3256 add_setshow_uinteger_cmd ("buffer-size", no_class,
3257 &record_btrace_conf.pt.size,
3258 _("Set the record/replay pt buffer size."),
3259 _("Show the record/replay pt buffer size."), _("\
3260 Bigger buffers allow longer recording but also take more time to process \
3261 the recorded execution.\n\
3262 The actual buffer size may differ from the requested size. Use \"info record\" \
3263 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3264 &set_record_btrace_pt_cmdlist,
3265 &show_record_btrace_pt_cmdlist);
3267 add_target (record_btrace_target_info, record_btrace_target_open);
3269 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3272 record_btrace_conf.bts.size = 64 * 1024;
3273 record_btrace_conf.pt.size = 16 * 1024;