1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
45 static const target_info record_btrace_target_info = {
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
51 /* The target_ops of record-btrace. */
53 class record_btrace_target final : public target_ops
56 record_btrace_target ()
57 { to_stratum = record_stratum; }
59 const target_info &info () const override
60 { return record_btrace_target_info; }
62 void close () override;
63 void async (int) override;
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
68 void disconnect (const char *, int) override;
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
74 { record_kill (this); }
76 enum record_method record_method (ptid_t ptid) override;
78 void stop_recording () override;
79 void info_record () override;
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
96 enum target_xfer_status xfer_partial (enum target_object object,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
108 void fetch_registers (struct regcache *, int) override;
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
113 const struct frame_unwind *get_unwinder () override;
115 const struct frame_unwind *get_tailcall_unwinder () override;
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
128 bool can_execute_reverse () override;
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
141 static record_btrace_target record_btrace_ops;
143 /* Initialize the record-btrace target ops. */
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token;
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
209 #define DEBUG(msg, args...) \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
224 switch (record_btrace_cpu_state)
230 record_btrace_cpu.vendor = CV_UNKNOWN;
233 return &record_btrace_cpu;
236 error (_("Internal error: bad record btrace cpu state."));
239 /* Update the branch trace for the current thread and return a pointer to its
242 Throws an error if there is no thread or no trace. This function never
245 static struct thread_info *
246 require_btrace_thread (void)
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
253 thread_info *tp = inferior_thread ();
255 validate_registers_access ();
257 btrace_fetch (tp, record_btrace_get_cpu ());
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
268 Throws an error if there is no thread or no trace. This function never
271 static struct btrace_thread_info *
272 require_btrace (void)
274 struct thread_info *tp;
276 tp = require_btrace_thread ();
281 /* Enable branch tracing for one thread. Warn on errors. */
284 record_btrace_enable_warn (struct thread_info *tp)
288 btrace_enable (tp, &record_btrace_conf);
290 CATCH (error, RETURN_MASK_ERROR)
292 warning ("%s", error.message);
297 /* Enable automatic tracing of new threads. */
300 record_btrace_auto_enable (void)
302 DEBUG ("attach thread observer");
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
308 /* Disable automatic tracing of new threads. */
311 record_btrace_auto_disable (void)
313 DEBUG ("detach thread observer");
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
318 /* The record-btrace async event handler function. */
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
323 inferior_event_handler (INF_REG_EVENT, NULL);
326 /* See record-btrace.h. */
329 record_btrace_push_target (void)
333 record_btrace_auto_enable ();
335 push_target (&record_btrace_ops);
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
340 record_btrace_generating_corefile = 0;
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
346 /* Disable btrace on a set of threads on scope exit. */
348 struct scoped_btrace_disable
350 scoped_btrace_disable () = default;
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
354 ~scoped_btrace_disable ()
356 for (thread_info *tp : m_threads)
360 void add_thread (thread_info *thread)
362 m_threads.push_front (thread);
371 std::forward_list<thread_info *> m_threads;
374 /* Open target record-btrace. */
377 record_btrace_target_open (const char *args, int from_tty)
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
387 if (!target_has_execution)
388 error (_("The program is not being run."));
390 for (thread_info *tp : all_non_exited_threads ())
391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
393 btrace_enable (tp, &record_btrace_conf);
395 btrace_disable.add_thread (tp);
398 record_btrace_push_target ();
400 btrace_disable.discard ();
403 /* The stop_recording method of target record-btrace. */
406 record_btrace_target::stop_recording ()
408 DEBUG ("stop recording");
410 record_btrace_auto_disable ();
412 for (thread_info *tp : all_non_exited_threads ())
413 if (tp->btrace.target != NULL)
417 /* The disconnect method of target record-btrace. */
420 record_btrace_target::disconnect (const char *args,
423 struct target_ops *beneath = this->beneath ();
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
428 /* Forward disconnect. */
429 beneath->disconnect (args, from_tty);
432 /* The close method of target record-btrace. */
435 record_btrace_target::close ()
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info *tp : all_non_exited_threads ())
447 btrace_teardown (tp);
450 /* The async method of target record-btrace. */
453 record_btrace_target::async (int enable)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
460 this->beneath ()->async (enable);
463 /* Adjusts the size and returns a human readable size suffix. */
466 record_btrace_adjust_size (unsigned int *size)
472 if ((sz & ((1u << 30) - 1)) == 0)
477 else if ((sz & ((1u << 20) - 1)) == 0)
482 else if ((sz & ((1u << 10) - 1)) == 0)
491 /* Print a BTS configuration. */
494 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
507 /* Print an Intel Processor Trace configuration. */
510 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
523 /* Print a branch tracing configuration. */
526 record_btrace_print_conf (const struct btrace_config *conf)
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
531 switch (conf->format)
533 case BTRACE_FORMAT_NONE:
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
545 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
548 /* The info_record method of target record-btrace. */
551 record_btrace_target::info_record ()
553 struct btrace_thread_info *btinfo;
554 const struct btrace_config *conf;
555 struct thread_info *tp;
556 unsigned int insns, calls, gaps;
560 tp = find_thread_ptid (inferior_ptid);
562 error (_("No thread."));
564 validate_registers_access ();
566 btinfo = &tp->btrace;
568 conf = ::btrace_conf (btinfo);
570 record_btrace_print_conf (conf);
572 btrace_fetch (tp, record_btrace_get_cpu ());
578 if (!btrace_is_empty (tp))
580 struct btrace_call_iterator call;
581 struct btrace_insn_iterator insn;
583 btrace_call_end (&call, btinfo);
584 btrace_call_prev (&call, 1);
585 calls = btrace_call_number (&call);
587 btrace_insn_end (&insn, btinfo);
588 insns = btrace_insn_number (&insn);
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn) != NULL)
595 gaps = btinfo->ngaps;
598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
599 "for thread %s (%s).\n"), insns, calls, gaps,
600 print_thread_id (tp), target_pid_to_str (tp->ptid));
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
607 /* Print a decode error. */
610 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
613 const char *errstr = btrace_decode_error (format, errcode);
615 uiout->text (_("["));
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
627 /* Print an unsigned int. */
630 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
632 uiout->field_fmt (fld, "%u", val);
635 /* A range of source lines. */
637 struct btrace_line_range
639 /* The symtab this line is from. */
640 struct symtab *symtab;
642 /* The first line (inclusive). */
645 /* The last line (exclusive). */
649 /* Construct a line range. */
651 static struct btrace_line_range
652 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
654 struct btrace_line_range range;
656 range.symtab = symtab;
663 /* Add a line to a line range. */
665 static struct btrace_line_range
666 btrace_line_range_add (struct btrace_line_range range, int line)
668 if (range.end <= range.begin)
670 /* This is the first entry. */
672 range.end = line + 1;
674 else if (line < range.begin)
676 else if (range.end < line)
682 /* Return non-zero if RANGE is empty, zero otherwise. */
685 btrace_line_range_is_empty (struct btrace_line_range range)
687 return range.end <= range.begin;
690 /* Return non-zero if LHS contains RHS, zero otherwise. */
693 btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
701 /* Find the line range associated with PC. */
703 static struct btrace_line_range
704 btrace_find_line_range (CORE_ADDR pc)
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
712 symtab = find_pc_line_symtab (pc);
714 return btrace_mk_line_range (NULL, 0, 0);
716 ltable = SYMTAB_LINETABLE (symtab);
718 return btrace_mk_line_range (symtab, 0, 0);
720 nlines = ltable->nitems;
721 lines = ltable->item;
723 return btrace_mk_line_range (symtab, 0, 0);
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
728 if ((lines[i].pc == pc) && (lines[i].line != 0))
729 range = btrace_line_range_add (range, lines[i].line);
735 /* Print source lines in LINES to UIOUT.
737 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
738 instructions corresponding to that source line. When printing a new source
739 line, we do the cleanups for the open chain and open a new cleanup chain for
740 the new source line. If the source line range in LINES is not empty, this
741 function will leave the cleanup chain for the last printed source line open
742 so instructions can be added to it. */
745 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
746 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
747 gdb::optional<ui_out_emit_list> *asm_list,
748 gdb_disassembly_flags flags)
750 print_source_lines_flags psl_flags;
752 if (flags & DISASSEMBLY_FILENAME)
753 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
755 for (int line = lines.begin; line < lines.end; ++line)
759 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
761 print_source_lines (lines.symtab, line, line + 1, psl_flags);
763 asm_list->emplace (uiout, "line_asm_insn");
767 /* Disassemble a section of the recorded instruction trace. */
770 btrace_insn_history (struct ui_out *uiout,
771 const struct btrace_thread_info *btinfo,
772 const struct btrace_insn_iterator *begin,
773 const struct btrace_insn_iterator *end,
774 gdb_disassembly_flags flags)
776 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
777 btrace_insn_number (begin), btrace_insn_number (end));
779 flags |= DISASSEMBLY_SPECULATIVE;
781 struct gdbarch *gdbarch = target_gdbarch ();
782 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
784 ui_out_emit_list list_emitter (uiout, "asm_insns");
786 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
787 gdb::optional<ui_out_emit_list> asm_list;
789 gdb_pretty_print_disassembler disasm (gdbarch);
791 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
792 btrace_insn_next (&it, 1))
794 const struct btrace_insn *insn;
796 insn = btrace_insn_get (&it);
798 /* A NULL instruction indicates a gap in the trace. */
801 const struct btrace_config *conf;
803 conf = btrace_conf (btinfo);
805 /* We have trace so we must have a configuration. */
806 gdb_assert (conf != NULL);
808 uiout->field_fmt ("insn-number", "%u",
809 btrace_insn_number (&it));
812 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
817 struct disasm_insn dinsn;
819 if ((flags & DISASSEMBLY_SOURCE) != 0)
821 struct btrace_line_range lines;
823 lines = btrace_find_line_range (insn->pc);
824 if (!btrace_line_range_is_empty (lines)
825 && !btrace_line_range_contains_range (last_lines, lines))
827 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
831 else if (!src_and_asm_tuple.has_value ())
833 gdb_assert (!asm_list.has_value ());
835 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
837 /* No source information. */
838 asm_list.emplace (uiout, "line_asm_insn");
841 gdb_assert (src_and_asm_tuple.has_value ());
842 gdb_assert (asm_list.has_value ());
845 memset (&dinsn, 0, sizeof (dinsn));
846 dinsn.number = btrace_insn_number (&it);
847 dinsn.addr = insn->pc;
849 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
850 dinsn.is_speculative = 1;
852 disasm.pretty_print_insn (uiout, &dinsn, flags);
857 /* The insn_history method of target record-btrace. */
860 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
862 struct btrace_thread_info *btinfo;
863 struct btrace_insn_history *history;
864 struct btrace_insn_iterator begin, end;
865 struct ui_out *uiout;
866 unsigned int context, covered;
868 uiout = current_uiout;
869 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
870 context = abs (size);
872 error (_("Bad record instruction-history-size."));
874 btinfo = require_btrace ();
875 history = btinfo->insn_history;
878 struct btrace_insn_iterator *replay;
880 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
882 /* If we're replaying, we start at the replay position. Otherwise, we
883 start at the tail of the trace. */
884 replay = btinfo->replay;
888 btrace_insn_end (&begin, btinfo);
890 /* We start from here and expand in the requested direction. Then we
891 expand in the other direction, as well, to fill up any remaining
896 /* We want the current position covered, as well. */
897 covered = btrace_insn_next (&end, 1);
898 covered += btrace_insn_prev (&begin, context - covered);
899 covered += btrace_insn_next (&end, context - covered);
903 covered = btrace_insn_next (&end, context);
904 covered += btrace_insn_prev (&begin, context - covered);
909 begin = history->begin;
912 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
913 btrace_insn_number (&begin), btrace_insn_number (&end));
918 covered = btrace_insn_prev (&begin, context);
923 covered = btrace_insn_next (&end, context);
928 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
932 printf_unfiltered (_("At the start of the branch trace record.\n"));
934 printf_unfiltered (_("At the end of the branch trace record.\n"));
937 btrace_set_insn_history (btinfo, &begin, &end);
940 /* The insn_history_range method of target record-btrace. */
943 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
944 gdb_disassembly_flags flags)
946 struct btrace_thread_info *btinfo;
947 struct btrace_insn_iterator begin, end;
948 struct ui_out *uiout;
949 unsigned int low, high;
952 uiout = current_uiout;
953 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
957 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
959 /* Check for wrap-arounds. */
960 if (low != from || high != to)
961 error (_("Bad range."));
964 error (_("Bad range."));
966 btinfo = require_btrace ();
968 found = btrace_find_insn_by_number (&begin, btinfo, low);
970 error (_("Range out of bounds."));
972 found = btrace_find_insn_by_number (&end, btinfo, high);
975 /* Silently truncate the range. */
976 btrace_insn_end (&end, btinfo);
980 /* We want both begin and end to be inclusive. */
981 btrace_insn_next (&end, 1);
984 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
985 btrace_set_insn_history (btinfo, &begin, &end);
988 /* The insn_history_from method of target record-btrace. */
991 record_btrace_target::insn_history_from (ULONGEST from, int size,
992 gdb_disassembly_flags flags)
994 ULONGEST begin, end, context;
996 context = abs (size);
998 error (_("Bad record instruction-history-size."));
1007 begin = from - context + 1;
1012 end = from + context - 1;
1014 /* Check for wrap-around. */
1019 insn_history_range (begin, end, flags);
1022 /* Print the instruction number range for a function call history line. */
1025 btrace_call_history_insn_range (struct ui_out *uiout,
1026 const struct btrace_function *bfun)
1028 unsigned int begin, end, size;
1030 size = bfun->insn.size ();
1031 gdb_assert (size > 0);
1033 begin = bfun->insn_offset;
1034 end = begin + size - 1;
1036 ui_out_field_uint (uiout, "insn begin", begin);
1038 ui_out_field_uint (uiout, "insn end", end);
1041 /* Compute the lowest and highest source line for the instructions in BFUN
1042 and return them in PBEGIN and PEND.
1043 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1044 result from inlining or macro expansion. */
1047 btrace_compute_src_line_range (const struct btrace_function *bfun,
1048 int *pbegin, int *pend)
1050 struct symtab *symtab;
1061 symtab = symbol_symtab (sym);
1063 for (const btrace_insn &insn : bfun->insn)
1065 struct symtab_and_line sal;
1067 sal = find_pc_line (insn.pc, 0);
1068 if (sal.symtab != symtab || sal.line == 0)
1071 begin = std::min (begin, sal.line);
1072 end = std::max (end, sal.line);
1080 /* Print the source line information for a function call history line. */
1083 btrace_call_history_src_line (struct ui_out *uiout,
1084 const struct btrace_function *bfun)
1093 uiout->field_string ("file",
1094 symtab_to_filename_for_display (symbol_symtab (sym)));
1096 btrace_compute_src_line_range (bfun, &begin, &end);
1101 uiout->field_int ("min line", begin);
1107 uiout->field_int ("max line", end);
1110 /* Get the name of a branch trace function. */
1113 btrace_get_bfun_name (const struct btrace_function *bfun)
1115 struct minimal_symbol *msym;
1125 return SYMBOL_PRINT_NAME (sym);
1126 else if (msym != NULL)
1127 return MSYMBOL_PRINT_NAME (msym);
1132 /* Disassemble a section of the recorded function trace. */
1135 btrace_call_history (struct ui_out *uiout,
1136 const struct btrace_thread_info *btinfo,
1137 const struct btrace_call_iterator *begin,
1138 const struct btrace_call_iterator *end,
1141 struct btrace_call_iterator it;
1142 record_print_flags flags = (enum record_print_flag) int_flags;
1144 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1145 btrace_call_number (end));
1147 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1149 const struct btrace_function *bfun;
1150 struct minimal_symbol *msym;
1153 bfun = btrace_call_get (&it);
1157 /* Print the function index. */
1158 ui_out_field_uint (uiout, "index", bfun->number);
1161 /* Indicate gaps in the trace. */
1162 if (bfun->errcode != 0)
1164 const struct btrace_config *conf;
1166 conf = btrace_conf (btinfo);
1168 /* We have trace so we must have a configuration. */
1169 gdb_assert (conf != NULL);
1171 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1176 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1178 int level = bfun->level + btinfo->level, i;
1180 for (i = 0; i < level; ++i)
1185 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1186 else if (msym != NULL)
1187 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1188 else if (!uiout->is_mi_like_p ())
1189 uiout->field_string ("function", "??");
1191 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1193 uiout->text (_("\tinst "));
1194 btrace_call_history_insn_range (uiout, bfun);
1197 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1199 uiout->text (_("\tat "));
1200 btrace_call_history_src_line (uiout, bfun);
1207 /* The call_history method of target record-btrace. */
1210 record_btrace_target::call_history (int size, record_print_flags flags)
1212 struct btrace_thread_info *btinfo;
1213 struct btrace_call_history *history;
1214 struct btrace_call_iterator begin, end;
1215 struct ui_out *uiout;
1216 unsigned int context, covered;
1218 uiout = current_uiout;
1219 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1220 context = abs (size);
1222 error (_("Bad record function-call-history-size."));
1224 btinfo = require_btrace ();
1225 history = btinfo->call_history;
1226 if (history == NULL)
1228 struct btrace_insn_iterator *replay;
1230 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1232 /* If we're replaying, we start at the replay position. Otherwise, we
1233 start at the tail of the trace. */
1234 replay = btinfo->replay;
1237 begin.btinfo = btinfo;
1238 begin.index = replay->call_index;
1241 btrace_call_end (&begin, btinfo);
1243 /* We start from here and expand in the requested direction. Then we
1244 expand in the other direction, as well, to fill up any remaining
1249 /* We want the current position covered, as well. */
1250 covered = btrace_call_next (&end, 1);
1251 covered += btrace_call_prev (&begin, context - covered);
1252 covered += btrace_call_next (&end, context - covered);
1256 covered = btrace_call_next (&end, context);
1257 covered += btrace_call_prev (&begin, context- covered);
1262 begin = history->begin;
1265 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1266 btrace_call_number (&begin), btrace_call_number (&end));
1271 covered = btrace_call_prev (&begin, context);
1276 covered = btrace_call_next (&end, context);
1281 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1285 printf_unfiltered (_("At the start of the branch trace record.\n"));
1287 printf_unfiltered (_("At the end of the branch trace record.\n"));
1290 btrace_set_call_history (btinfo, &begin, &end);
1293 /* The call_history_range method of target record-btrace. */
1296 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1297 record_print_flags flags)
1299 struct btrace_thread_info *btinfo;
1300 struct btrace_call_iterator begin, end;
1301 struct ui_out *uiout;
1302 unsigned int low, high;
1305 uiout = current_uiout;
1306 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1310 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1312 /* Check for wrap-arounds. */
1313 if (low != from || high != to)
1314 error (_("Bad range."));
1317 error (_("Bad range."));
1319 btinfo = require_btrace ();
1321 found = btrace_find_call_by_number (&begin, btinfo, low);
1323 error (_("Range out of bounds."));
1325 found = btrace_find_call_by_number (&end, btinfo, high);
1328 /* Silently truncate the range. */
1329 btrace_call_end (&end, btinfo);
1333 /* We want both begin and end to be inclusive. */
1334 btrace_call_next (&end, 1);
1337 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1338 btrace_set_call_history (btinfo, &begin, &end);
1341 /* The call_history_from method of target record-btrace. */
1344 record_btrace_target::call_history_from (ULONGEST from, int size,
1345 record_print_flags flags)
1347 ULONGEST begin, end, context;
1349 context = abs (size);
1351 error (_("Bad record function-call-history-size."));
1360 begin = from - context + 1;
1365 end = from + context - 1;
1367 /* Check for wrap-around. */
1372 call_history_range ( begin, end, flags);
1375 /* The record_method method of target record-btrace. */
1378 record_btrace_target::record_method (ptid_t ptid)
1380 struct thread_info * const tp = find_thread_ptid (ptid);
1383 error (_("No thread."));
1385 if (tp->btrace.target == NULL)
1386 return RECORD_METHOD_NONE;
1388 return RECORD_METHOD_BTRACE;
1391 /* The record_is_replaying method of target record-btrace. */
1394 record_btrace_target::record_is_replaying (ptid_t ptid)
1396 for (thread_info *tp : all_non_exited_threads (ptid))
1397 if (btrace_is_replaying (tp))
1403 /* The record_will_replay method of target record-btrace. */
1406 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1408 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1411 /* The xfer_partial method of target record-btrace. */
1413 enum target_xfer_status
1414 record_btrace_target::xfer_partial (enum target_object object,
1415 const char *annex, gdb_byte *readbuf,
1416 const gdb_byte *writebuf, ULONGEST offset,
1417 ULONGEST len, ULONGEST *xfered_len)
1419 /* Filter out requests that don't make sense during replay. */
1420 if (replay_memory_access == replay_memory_access_read_only
1421 && !record_btrace_generating_corefile
1422 && record_is_replaying (inferior_ptid))
1426 case TARGET_OBJECT_MEMORY:
1428 struct target_section *section;
1430 /* We do not allow writing memory in general. */
1431 if (writebuf != NULL)
1434 return TARGET_XFER_UNAVAILABLE;
1437 /* We allow reading readonly memory. */
1438 section = target_section_by_addr (this, offset);
1439 if (section != NULL)
1441 /* Check if the section we found is readonly. */
1442 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1443 section->the_bfd_section)
1444 & SEC_READONLY) != 0)
1446 /* Truncate the request to fit into this section. */
1447 len = std::min (len, section->endaddr - offset);
1453 return TARGET_XFER_UNAVAILABLE;
1458 /* Forward the request. */
1459 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1460 offset, len, xfered_len);
1463 /* The insert_breakpoint method of target record-btrace. */
1466 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1467 struct bp_target_info *bp_tgt)
1472 /* Inserting breakpoints requires accessing memory. Allow it for the
1473 duration of this function. */
1474 old = replay_memory_access;
1475 replay_memory_access = replay_memory_access_read_write;
1480 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1482 CATCH (except, RETURN_MASK_ALL)
1484 replay_memory_access = old;
1485 throw_exception (except);
1488 replay_memory_access = old;
1493 /* The remove_breakpoint method of target record-btrace. */
1496 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1497 struct bp_target_info *bp_tgt,
1498 enum remove_bp_reason reason)
1503 /* Removing breakpoints requires accessing memory. Allow it for the
1504 duration of this function. */
1505 old = replay_memory_access;
1506 replay_memory_access = replay_memory_access_read_write;
1511 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1513 CATCH (except, RETURN_MASK_ALL)
1515 replay_memory_access = old;
1516 throw_exception (except);
1519 replay_memory_access = old;
1524 /* The fetch_registers method of target record-btrace. */
1527 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1529 struct btrace_insn_iterator *replay;
1530 struct thread_info *tp;
1532 tp = find_thread_ptid (regcache->ptid ());
1533 gdb_assert (tp != NULL);
1535 replay = tp->btrace.replay;
1536 if (replay != NULL && !record_btrace_generating_corefile)
1538 const struct btrace_insn *insn;
1539 struct gdbarch *gdbarch;
1542 gdbarch = regcache->arch ();
1543 pcreg = gdbarch_pc_regnum (gdbarch);
1547 /* We can only provide the PC register. */
1548 if (regno >= 0 && regno != pcreg)
1551 insn = btrace_insn_get (replay);
1552 gdb_assert (insn != NULL);
1554 regcache->raw_supply (regno, &insn->pc);
1557 this->beneath ()->fetch_registers (regcache, regno);
1560 /* The store_registers method of target record-btrace. */
1563 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1565 if (!record_btrace_generating_corefile
1566 && record_is_replaying (regcache->ptid ()))
1567 error (_("Cannot write registers while replaying."));
1569 gdb_assert (may_write_registers != 0);
1571 this->beneath ()->store_registers (regcache, regno);
1574 /* The prepare_to_store method of target record-btrace. */
1577 record_btrace_target::prepare_to_store (struct regcache *regcache)
1579 if (!record_btrace_generating_corefile
1580 && record_is_replaying (regcache->ptid ()))
1583 this->beneath ()->prepare_to_store (regcache);
1586 /* The branch trace frame cache. */
1588 struct btrace_frame_cache
1591 struct thread_info *tp;
1593 /* The frame info. */
1594 struct frame_info *frame;
1596 /* The branch trace function segment. */
1597 const struct btrace_function *bfun;
1600 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1602 static htab_t bfcache;
1604 /* hash_f for htab_create_alloc of bfcache. */
1607 bfcache_hash (const void *arg)
1609 const struct btrace_frame_cache *cache
1610 = (const struct btrace_frame_cache *) arg;
1612 return htab_hash_pointer (cache->frame);
1615 /* eq_f for htab_create_alloc of bfcache. */
1618 bfcache_eq (const void *arg1, const void *arg2)
1620 const struct btrace_frame_cache *cache1
1621 = (const struct btrace_frame_cache *) arg1;
1622 const struct btrace_frame_cache *cache2
1623 = (const struct btrace_frame_cache *) arg2;
1625 return cache1->frame == cache2->frame;
1628 /* Create a new btrace frame cache. */
1630 static struct btrace_frame_cache *
1631 bfcache_new (struct frame_info *frame)
1633 struct btrace_frame_cache *cache;
1636 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1637 cache->frame = frame;
1639 slot = htab_find_slot (bfcache, cache, INSERT);
1640 gdb_assert (*slot == NULL);
1646 /* Extract the branch trace function from a branch trace frame. */
1648 static const struct btrace_function *
1649 btrace_get_frame_function (struct frame_info *frame)
1651 const struct btrace_frame_cache *cache;
1652 struct btrace_frame_cache pattern;
1655 pattern.frame = frame;
1657 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1661 cache = (const struct btrace_frame_cache *) *slot;
1665 /* Implement stop_reason method for record_btrace_frame_unwind. */
1667 static enum unwind_stop_reason
1668 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1671 const struct btrace_frame_cache *cache;
1672 const struct btrace_function *bfun;
1674 cache = (const struct btrace_frame_cache *) *this_cache;
1676 gdb_assert (bfun != NULL);
1679 return UNWIND_UNAVAILABLE;
1681 return UNWIND_NO_REASON;
1684 /* Implement this_id method for record_btrace_frame_unwind. */
1687 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1688 struct frame_id *this_id)
1690 const struct btrace_frame_cache *cache;
1691 const struct btrace_function *bfun;
1692 struct btrace_call_iterator it;
1693 CORE_ADDR code, special;
1695 cache = (const struct btrace_frame_cache *) *this_cache;
1698 gdb_assert (bfun != NULL);
1700 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1701 bfun = btrace_call_get (&it);
1703 code = get_frame_func (this_frame);
1704 special = bfun->number;
1706 *this_id = frame_id_build_unavailable_stack_special (code, special);
1708 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1709 btrace_get_bfun_name (cache->bfun),
1710 core_addr_to_string_nz (this_id->code_addr),
1711 core_addr_to_string_nz (this_id->special_addr));
1714 /* Implement prev_register method for record_btrace_frame_unwind. */
1716 static struct value *
1717 record_btrace_frame_prev_register (struct frame_info *this_frame,
1721 const struct btrace_frame_cache *cache;
1722 const struct btrace_function *bfun, *caller;
1723 struct btrace_call_iterator it;
1724 struct gdbarch *gdbarch;
1728 gdbarch = get_frame_arch (this_frame);
1729 pcreg = gdbarch_pc_regnum (gdbarch);
1730 if (pcreg < 0 || regnum != pcreg)
1731 throw_error (NOT_AVAILABLE_ERROR,
1732 _("Registers are not available in btrace record history"));
1734 cache = (const struct btrace_frame_cache *) *this_cache;
1736 gdb_assert (bfun != NULL);
1738 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1739 throw_error (NOT_AVAILABLE_ERROR,
1740 _("No caller in btrace record history"));
1742 caller = btrace_call_get (&it);
1744 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1745 pc = caller->insn.front ().pc;
1748 pc = caller->insn.back ().pc;
1749 pc += gdb_insn_length (gdbarch, pc);
1752 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1753 btrace_get_bfun_name (bfun), bfun->level,
1754 core_addr_to_string_nz (pc));
1756 return frame_unwind_got_address (this_frame, regnum, pc);
1759 /* Implement sniffer method for record_btrace_frame_unwind. */
1762 record_btrace_frame_sniffer (const struct frame_unwind *self,
1763 struct frame_info *this_frame,
1766 const struct btrace_function *bfun;
1767 struct btrace_frame_cache *cache;
1768 struct thread_info *tp;
1769 struct frame_info *next;
1771 /* THIS_FRAME does not contain a reference to its thread. */
1772 tp = inferior_thread ();
1775 next = get_next_frame (this_frame);
1778 const struct btrace_insn_iterator *replay;
1780 replay = tp->btrace.replay;
1782 bfun = &replay->btinfo->functions[replay->call_index];
1786 const struct btrace_function *callee;
1787 struct btrace_call_iterator it;
1789 callee = btrace_get_frame_function (next);
1790 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1793 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1796 bfun = btrace_call_get (&it);
1802 DEBUG ("[frame] sniffed frame for %s on level %d",
1803 btrace_get_bfun_name (bfun), bfun->level);
1805 /* This is our frame. Initialize the frame cache. */
1806 cache = bfcache_new (this_frame);
1810 *this_cache = cache;
1814 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1817 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1818 struct frame_info *this_frame,
1821 const struct btrace_function *bfun, *callee;
1822 struct btrace_frame_cache *cache;
1823 struct btrace_call_iterator it;
1824 struct frame_info *next;
1825 struct thread_info *tinfo;
1827 next = get_next_frame (this_frame);
1831 callee = btrace_get_frame_function (next);
1835 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1838 tinfo = inferior_thread ();
1839 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1842 bfun = btrace_call_get (&it);
1844 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1845 btrace_get_bfun_name (bfun), bfun->level);
1847 /* This is our frame. Initialize the frame cache. */
1848 cache = bfcache_new (this_frame);
1852 *this_cache = cache;
1857 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1859 struct btrace_frame_cache *cache;
1862 cache = (struct btrace_frame_cache *) this_cache;
1864 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1865 gdb_assert (slot != NULL);
1867 htab_remove_elt (bfcache, cache);
1870 /* btrace recording does not store previous memory content, neither the stack
1871 frames content. Any unwinding would return errorneous results as the stack
1872 contents no longer matches the changed PC value restored from history.
1873 Therefore this unwinder reports any possibly unwound registers as
1876 const struct frame_unwind record_btrace_frame_unwind =
1879 record_btrace_frame_unwind_stop_reason,
1880 record_btrace_frame_this_id,
1881 record_btrace_frame_prev_register,
1883 record_btrace_frame_sniffer,
1884 record_btrace_frame_dealloc_cache
1887 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1890 record_btrace_frame_unwind_stop_reason,
1891 record_btrace_frame_this_id,
1892 record_btrace_frame_prev_register,
1894 record_btrace_tailcall_frame_sniffer,
1895 record_btrace_frame_dealloc_cache
1898 /* Implement the get_unwinder method. */
1900 const struct frame_unwind *
1901 record_btrace_target::get_unwinder ()
1903 return &record_btrace_frame_unwind;
1906 /* Implement the get_tailcall_unwinder method. */
1908 const struct frame_unwind *
1909 record_btrace_target::get_tailcall_unwinder ()
1911 return &record_btrace_tailcall_frame_unwind;
1914 /* Return a human-readable string for FLAG. */
1917 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1925 return "reverse-step";
1931 return "reverse-cont";
1940 /* Indicate that TP should be resumed according to FLAG. */
1943 record_btrace_resume_thread (struct thread_info *tp,
1944 enum btrace_thread_flag flag)
1946 struct btrace_thread_info *btinfo;
1948 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1949 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1951 btinfo = &tp->btrace;
1953 /* Fetch the latest branch trace. */
1954 btrace_fetch (tp, record_btrace_get_cpu ());
1956 /* A resume request overwrites a preceding resume or stop request. */
1957 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1958 btinfo->flags |= flag;
1961 /* Get the current frame for TP. */
1963 static struct frame_id
1964 get_thread_current_frame_id (struct thread_info *tp)
1969 /* Set current thread, which is implicitly used by
1970 get_current_frame. */
1971 scoped_restore_current_thread restore_thread;
1973 switch_to_thread (tp);
1975 /* Clear the executing flag to allow changes to the current frame.
1976 We are not actually running, yet. We just started a reverse execution
1977 command or a record goto command.
1978 For the latter, EXECUTING is false and this has no effect.
1979 For the former, EXECUTING is true and we're in wait, about to
1980 move the thread. Since we need to recompute the stack, we temporarily
1981 set EXECUTING to flase. */
1982 executing = tp->executing;
1983 set_executing (inferior_ptid, false);
1988 id = get_frame_id (get_current_frame ());
1990 CATCH (except, RETURN_MASK_ALL)
1992 /* Restore the previous execution state. */
1993 set_executing (inferior_ptid, executing);
1995 throw_exception (except);
1999 /* Restore the previous execution state. */
2000 set_executing (inferior_ptid, executing);
2005 /* Start replaying a thread. */
2007 static struct btrace_insn_iterator *
2008 record_btrace_start_replaying (struct thread_info *tp)
2010 struct btrace_insn_iterator *replay;
2011 struct btrace_thread_info *btinfo;
2013 btinfo = &tp->btrace;
2016 /* We can't start replaying without trace. */
2017 if (btinfo->functions.empty ())
2020 /* GDB stores the current frame_id when stepping in order to detects steps
2022 Since frames are computed differently when we're replaying, we need to
2023 recompute those stored frames and fix them up so we can still detect
2024 subroutines after we started replaying. */
2027 struct frame_id frame_id;
2028 int upd_step_frame_id, upd_step_stack_frame_id;
2030 /* The current frame without replaying - computed via normal unwind. */
2031 frame_id = get_thread_current_frame_id (tp);
2033 /* Check if we need to update any stepping-related frame id's. */
2034 upd_step_frame_id = frame_id_eq (frame_id,
2035 tp->control.step_frame_id);
2036 upd_step_stack_frame_id = frame_id_eq (frame_id,
2037 tp->control.step_stack_frame_id);
2039 /* We start replaying at the end of the branch trace. This corresponds
2040 to the current instruction. */
2041 replay = XNEW (struct btrace_insn_iterator);
2042 btrace_insn_end (replay, btinfo);
2044 /* Skip gaps at the end of the trace. */
2045 while (btrace_insn_get (replay) == NULL)
2049 steps = btrace_insn_prev (replay, 1);
2051 error (_("No trace."));
2054 /* We're not replaying, yet. */
2055 gdb_assert (btinfo->replay == NULL);
2056 btinfo->replay = replay;
2058 /* Make sure we're not using any stale registers. */
2059 registers_changed_thread (tp);
2061 /* The current frame with replaying - computed via btrace unwind. */
2062 frame_id = get_thread_current_frame_id (tp);
2064 /* Replace stepping related frames where necessary. */
2065 if (upd_step_frame_id)
2066 tp->control.step_frame_id = frame_id;
2067 if (upd_step_stack_frame_id)
2068 tp->control.step_stack_frame_id = frame_id;
2070 CATCH (except, RETURN_MASK_ALL)
2072 xfree (btinfo->replay);
2073 btinfo->replay = NULL;
2075 registers_changed_thread (tp);
2077 throw_exception (except);
2084 /* Stop replaying a thread. */
2087 record_btrace_stop_replaying (struct thread_info *tp)
2089 struct btrace_thread_info *btinfo;
2091 btinfo = &tp->btrace;
2093 xfree (btinfo->replay);
2094 btinfo->replay = NULL;
2096 /* Make sure we're not leaving any stale registers. */
2097 registers_changed_thread (tp);
2100 /* Stop replaying TP if it is at the end of its execution history. */
2103 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2105 struct btrace_insn_iterator *replay, end;
2106 struct btrace_thread_info *btinfo;
2108 btinfo = &tp->btrace;
2109 replay = btinfo->replay;
2114 btrace_insn_end (&end, btinfo);
2116 if (btrace_insn_cmp (replay, &end) == 0)
2117 record_btrace_stop_replaying (tp);
2120 /* The resume method of target record-btrace. */
2123 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2125 enum btrace_thread_flag flag, cflag;
2127 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2128 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2129 step ? "step" : "cont");
2131 /* Store the execution direction of the last resume.
2133 If there is more than one resume call, we have to rely on infrun
2134 to not change the execution direction in-between. */
2135 record_btrace_resume_exec_dir = ::execution_direction;
2137 /* As long as we're not replaying, just forward the request.
2139 For non-stop targets this means that no thread is replaying. In order to
2140 make progress, we may need to explicitly move replaying threads to the end
2141 of their execution history. */
2142 if ((::execution_direction != EXEC_REVERSE)
2143 && !record_is_replaying (minus_one_ptid))
2145 this->beneath ()->resume (ptid, step, signal);
2149 /* Compute the btrace thread flag for the requested move. */
2150 if (::execution_direction == EXEC_REVERSE)
2152 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2157 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2161 /* We just indicate the resume intent here. The actual stepping happens in
2162 record_btrace_wait below.
2164 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2165 if (!target_is_non_stop_p ())
2167 gdb_assert (inferior_ptid.matches (ptid));
2169 for (thread_info *tp : all_non_exited_threads (ptid))
2171 if (tp->ptid.matches (inferior_ptid))
2172 record_btrace_resume_thread (tp, flag);
2174 record_btrace_resume_thread (tp, cflag);
2179 for (thread_info *tp : all_non_exited_threads (ptid))
2180 record_btrace_resume_thread (tp, flag);
2183 /* Async support. */
2184 if (target_can_async_p ())
2187 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2191 /* The commit_resume method of target record-btrace. */
2194 record_btrace_target::commit_resume ()
2196 if ((::execution_direction != EXEC_REVERSE)
2197 && !record_is_replaying (minus_one_ptid))
2198 beneath ()->commit_resume ();
2201 /* Cancel resuming TP. */
2204 record_btrace_cancel_resume (struct thread_info *tp)
2206 enum btrace_thread_flag flags;
2208 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2212 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2213 print_thread_id (tp),
2214 target_pid_to_str (tp->ptid), flags,
2215 btrace_thread_flag_to_str (flags));
2217 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2218 record_btrace_stop_replaying_at_end (tp);
2221 /* Return a target_waitstatus indicating that we ran out of history. */
2223 static struct target_waitstatus
2224 btrace_step_no_history (void)
2226 struct target_waitstatus status;
2228 status.kind = TARGET_WAITKIND_NO_HISTORY;
2233 /* Return a target_waitstatus indicating that a step finished. */
2235 static struct target_waitstatus
2236 btrace_step_stopped (void)
2238 struct target_waitstatus status;
2240 status.kind = TARGET_WAITKIND_STOPPED;
2241 status.value.sig = GDB_SIGNAL_TRAP;
2246 /* Return a target_waitstatus indicating that a thread was stopped as
2249 static struct target_waitstatus
2250 btrace_step_stopped_on_request (void)
2252 struct target_waitstatus status;
2254 status.kind = TARGET_WAITKIND_STOPPED;
2255 status.value.sig = GDB_SIGNAL_0;
2260 /* Return a target_waitstatus indicating a spurious stop. */
2262 static struct target_waitstatus
2263 btrace_step_spurious (void)
2265 struct target_waitstatus status;
2267 status.kind = TARGET_WAITKIND_SPURIOUS;
2272 /* Return a target_waitstatus indicating that the thread was not resumed. */
2274 static struct target_waitstatus
2275 btrace_step_no_resumed (void)
2277 struct target_waitstatus status;
2279 status.kind = TARGET_WAITKIND_NO_RESUMED;
2284 /* Return a target_waitstatus indicating that we should wait again. */
2286 static struct target_waitstatus
2287 btrace_step_again (void)
2289 struct target_waitstatus status;
2291 status.kind = TARGET_WAITKIND_IGNORE;
2296 /* Clear the record histories. */
2299 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2301 xfree (btinfo->insn_history);
2302 xfree (btinfo->call_history);
2304 btinfo->insn_history = NULL;
2305 btinfo->call_history = NULL;
2308 /* Check whether TP's current replay position is at a breakpoint. */
2311 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2313 struct btrace_insn_iterator *replay;
2314 struct btrace_thread_info *btinfo;
2315 const struct btrace_insn *insn;
2317 btinfo = &tp->btrace;
2318 replay = btinfo->replay;
2323 insn = btrace_insn_get (replay);
2327 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2328 &btinfo->stop_reason);
2331 /* Step one instruction in forward direction. */
2333 static struct target_waitstatus
2334 record_btrace_single_step_forward (struct thread_info *tp)
2336 struct btrace_insn_iterator *replay, end, start;
2337 struct btrace_thread_info *btinfo;
2339 btinfo = &tp->btrace;
2340 replay = btinfo->replay;
2342 /* We're done if we're not replaying. */
2344 return btrace_step_no_history ();
2346 /* Check if we're stepping a breakpoint. */
2347 if (record_btrace_replay_at_breakpoint (tp))
2348 return btrace_step_stopped ();
2350 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2351 jump back to the instruction at which we started. */
2357 /* We will bail out here if we continue stepping after reaching the end
2358 of the execution history. */
2359 steps = btrace_insn_next (replay, 1);
2363 return btrace_step_no_history ();
2366 while (btrace_insn_get (replay) == NULL);
2368 /* Determine the end of the instruction trace. */
2369 btrace_insn_end (&end, btinfo);
2371 /* The execution trace contains (and ends with) the current instruction.
2372 This instruction has not been executed, yet, so the trace really ends
2373 one instruction earlier. */
2374 if (btrace_insn_cmp (replay, &end) == 0)
2375 return btrace_step_no_history ();
2377 return btrace_step_spurious ();
2380 /* Step one instruction in backward direction. */
2382 static struct target_waitstatus
2383 record_btrace_single_step_backward (struct thread_info *tp)
2385 struct btrace_insn_iterator *replay, start;
2386 struct btrace_thread_info *btinfo;
2388 btinfo = &tp->btrace;
2389 replay = btinfo->replay;
2391 /* Start replaying if we're not already doing so. */
2393 replay = record_btrace_start_replaying (tp);
2395 /* If we can't step any further, we reached the end of the history.
2396 Skip gaps during replay. If we end up at a gap (at the beginning of
2397 the trace), jump back to the instruction at which we started. */
2403 steps = btrace_insn_prev (replay, 1);
2407 return btrace_step_no_history ();
2410 while (btrace_insn_get (replay) == NULL);
2412 /* Check if we're stepping a breakpoint.
2414 For reverse-stepping, this check is after the step. There is logic in
2415 infrun.c that handles reverse-stepping separately. See, for example,
2416 proceed and adjust_pc_after_break.
2418 This code assumes that for reverse-stepping, PC points to the last
2419 de-executed instruction, whereas for forward-stepping PC points to the
2420 next to-be-executed instruction. */
2421 if (record_btrace_replay_at_breakpoint (tp))
2422 return btrace_step_stopped ();
2424 return btrace_step_spurious ();
2427 /* Step a single thread. */
2429 static struct target_waitstatus
2430 record_btrace_step_thread (struct thread_info *tp)
2432 struct btrace_thread_info *btinfo;
2433 struct target_waitstatus status;
2434 enum btrace_thread_flag flags;
2436 btinfo = &tp->btrace;
2438 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2439 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2441 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2442 target_pid_to_str (tp->ptid), flags,
2443 btrace_thread_flag_to_str (flags));
2445 /* We can't step without an execution history. */
2446 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2447 return btrace_step_no_history ();
2452 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2455 return btrace_step_stopped_on_request ();
2458 status = record_btrace_single_step_forward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2462 return btrace_step_stopped ();
2465 status = record_btrace_single_step_backward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2469 return btrace_step_stopped ();
2472 status = record_btrace_single_step_forward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2476 btinfo->flags |= flags;
2477 return btrace_step_again ();
2480 status = record_btrace_single_step_backward (tp);
2481 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2484 btinfo->flags |= flags;
2485 return btrace_step_again ();
2488 /* We keep threads moving at the end of their execution history. The wait
2489 method will stop the thread for whom the event is reported. */
2490 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2491 btinfo->flags |= flags;
2496 /* Announce further events if necessary. */
2499 record_btrace_maybe_mark_async_event
2500 (const std::vector<thread_info *> &moving,
2501 const std::vector<thread_info *> &no_history)
2503 bool more_moving = !moving.empty ();
2504 bool more_no_history = !no_history.empty ();;
2506 if (!more_moving && !more_no_history)
2510 DEBUG ("movers pending");
2512 if (more_no_history)
2513 DEBUG ("no-history pending");
2515 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2518 /* The wait method of target record-btrace. */
2521 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2524 std::vector<thread_info *> moving;
2525 std::vector<thread_info *> no_history;
2527 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2529 /* As long as we're not replaying, just forward the request. */
2530 if ((::execution_direction != EXEC_REVERSE)
2531 && !record_is_replaying (minus_one_ptid))
2533 return this->beneath ()->wait (ptid, status, options);
2536 /* Keep a work list of moving threads. */
2537 for (thread_info *tp : all_non_exited_threads (ptid))
2538 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2539 moving.push_back (tp);
2541 if (moving.empty ())
2543 *status = btrace_step_no_resumed ();
2545 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2546 target_waitstatus_to_string (status).c_str ());
2551 /* Step moving threads one by one, one step each, until either one thread
2552 reports an event or we run out of threads to step.
2554 When stepping more than one thread, chances are that some threads reach
2555 the end of their execution history earlier than others. If we reported
2556 this immediately, all-stop on top of non-stop would stop all threads and
2557 resume the same threads next time. And we would report the same thread
2558 having reached the end of its execution history again.
2560 In the worst case, this would starve the other threads. But even if other
2561 threads would be allowed to make progress, this would result in far too
2562 many intermediate stops.
2564 We therefore delay the reporting of "no execution history" until we have
2565 nothing else to report. By this time, all threads should have moved to
2566 either the beginning or the end of their execution history. There will
2567 be a single user-visible stop. */
2568 struct thread_info *eventing = NULL;
2569 while ((eventing == NULL) && !moving.empty ())
2571 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2573 thread_info *tp = moving[ix];
2575 *status = record_btrace_step_thread (tp);
2577 switch (status->kind)
2579 case TARGET_WAITKIND_IGNORE:
2583 case TARGET_WAITKIND_NO_HISTORY:
2584 no_history.push_back (ordered_remove (moving, ix));
2588 eventing = unordered_remove (moving, ix);
2594 if (eventing == NULL)
2596 /* We started with at least one moving thread. This thread must have
2597 either stopped or reached the end of its execution history.
2599 In the former case, EVENTING must not be NULL.
2600 In the latter case, NO_HISTORY must not be empty. */
2601 gdb_assert (!no_history.empty ());
2603 /* We kept threads moving at the end of their execution history. Stop
2604 EVENTING now that we are going to report its stop. */
2605 eventing = unordered_remove (no_history, 0);
2606 eventing->btrace.flags &= ~BTHR_MOVE;
2608 *status = btrace_step_no_history ();
2611 gdb_assert (eventing != NULL);
2613 /* We kept threads replaying at the end of their execution history. Stop
2614 replaying EVENTING now that we are going to report its stop. */
2615 record_btrace_stop_replaying_at_end (eventing);
2617 /* Stop all other threads. */
2618 if (!target_is_non_stop_p ())
2620 for (thread_info *tp : all_non_exited_threads ())
2621 record_btrace_cancel_resume (tp);
2624 /* In async mode, we need to announce further events. */
2625 if (target_is_async_p ())
2626 record_btrace_maybe_mark_async_event (moving, no_history);
2628 /* Start record histories anew from the current position. */
2629 record_btrace_clear_histories (&eventing->btrace);
2631 /* We moved the replay position but did not update registers. */
2632 registers_changed_thread (eventing);
2634 DEBUG ("wait ended by thread %s (%s): %s",
2635 print_thread_id (eventing),
2636 target_pid_to_str (eventing->ptid),
2637 target_waitstatus_to_string (status).c_str ());
2639 return eventing->ptid;
2642 /* The stop method of target record-btrace. */
2645 record_btrace_target::stop (ptid_t ptid)
2647 DEBUG ("stop %s", target_pid_to_str (ptid));
2649 /* As long as we're not replaying, just forward the request. */
2650 if ((::execution_direction != EXEC_REVERSE)
2651 && !record_is_replaying (minus_one_ptid))
2653 this->beneath ()->stop (ptid);
2657 for (thread_info *tp : all_non_exited_threads (ptid))
2659 tp->btrace.flags &= ~BTHR_MOVE;
2660 tp->btrace.flags |= BTHR_STOP;
2665 /* The can_execute_reverse method of target record-btrace. */
2668 record_btrace_target::can_execute_reverse ()
2673 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2676 record_btrace_target::stopped_by_sw_breakpoint ()
2678 if (record_is_replaying (minus_one_ptid))
2680 struct thread_info *tp = inferior_thread ();
2682 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2685 return this->beneath ()->stopped_by_sw_breakpoint ();
2688 /* The supports_stopped_by_sw_breakpoint method of target
2692 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2694 if (record_is_replaying (minus_one_ptid))
2697 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2700 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2703 record_btrace_target::stopped_by_hw_breakpoint ()
2705 if (record_is_replaying (minus_one_ptid))
2707 struct thread_info *tp = inferior_thread ();
2709 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2712 return this->beneath ()->stopped_by_hw_breakpoint ();
2715 /* The supports_stopped_by_hw_breakpoint method of target
2719 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2721 if (record_is_replaying (minus_one_ptid))
2724 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2727 /* The update_thread_list method of target record-btrace. */
2730 record_btrace_target::update_thread_list ()
2732 /* We don't add or remove threads during replay. */
2733 if (record_is_replaying (minus_one_ptid))
2736 /* Forward the request. */
2737 this->beneath ()->update_thread_list ();
2740 /* The thread_alive method of target record-btrace. */
2743 record_btrace_target::thread_alive (ptid_t ptid)
2745 /* We don't add or remove threads during replay. */
2746 if (record_is_replaying (minus_one_ptid))
2749 /* Forward the request. */
2750 return this->beneath ()->thread_alive (ptid);
2753 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2757 record_btrace_set_replay (struct thread_info *tp,
2758 const struct btrace_insn_iterator *it)
2760 struct btrace_thread_info *btinfo;
2762 btinfo = &tp->btrace;
2765 record_btrace_stop_replaying (tp);
2768 if (btinfo->replay == NULL)
2769 record_btrace_start_replaying (tp);
2770 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2773 *btinfo->replay = *it;
2774 registers_changed_thread (tp);
2777 /* Start anew from the new replay position. */
2778 record_btrace_clear_histories (btinfo);
2780 inferior_thread ()->suspend.stop_pc
2781 = regcache_read_pc (get_current_regcache ());
2782 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2785 /* The goto_record_begin method of target record-btrace. */
2788 record_btrace_target::goto_record_begin ()
2790 struct thread_info *tp;
2791 struct btrace_insn_iterator begin;
2793 tp = require_btrace_thread ();
2795 btrace_insn_begin (&begin, &tp->btrace);
2797 /* Skip gaps at the beginning of the trace. */
2798 while (btrace_insn_get (&begin) == NULL)
2802 steps = btrace_insn_next (&begin, 1);
2804 error (_("No trace."));
2807 record_btrace_set_replay (tp, &begin);
2810 /* The goto_record_end method of target record-btrace. */
2813 record_btrace_target::goto_record_end ()
2815 struct thread_info *tp;
2817 tp = require_btrace_thread ();
2819 record_btrace_set_replay (tp, NULL);
2822 /* The goto_record method of target record-btrace. */
2825 record_btrace_target::goto_record (ULONGEST insn)
2827 struct thread_info *tp;
2828 struct btrace_insn_iterator it;
2829 unsigned int number;
2834 /* Check for wrap-arounds. */
2836 error (_("Instruction number out of range."));
2838 tp = require_btrace_thread ();
2840 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2842 /* Check if the instruction could not be found or is a gap. */
2843 if (found == 0 || btrace_insn_get (&it) == NULL)
2844 error (_("No such instruction."));
2846 record_btrace_set_replay (tp, &it);
2849 /* The record_stop_replaying method of target record-btrace. */
2852 record_btrace_target::record_stop_replaying ()
2854 for (thread_info *tp : all_non_exited_threads ())
2855 record_btrace_stop_replaying (tp);
2858 /* The execution_direction target method. */
2860 enum exec_direction_kind
2861 record_btrace_target::execution_direction ()
2863 return record_btrace_resume_exec_dir;
2866 /* The prepare_to_generate_core target method. */
2869 record_btrace_target::prepare_to_generate_core ()
2871 record_btrace_generating_corefile = 1;
2874 /* The done_generating_core target method. */
2877 record_btrace_target::done_generating_core ()
2879 record_btrace_generating_corefile = 0;
2882 /* Start recording in BTS format. */
2885 cmd_record_btrace_bts_start (const char *args, int from_tty)
2887 if (args != NULL && *args != 0)
2888 error (_("Invalid argument."));
2890 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2894 execute_command ("target record-btrace", from_tty);
2896 CATCH (exception, RETURN_MASK_ALL)
2898 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2899 throw_exception (exception);
2904 /* Start recording in Intel Processor Trace format. */
2907 cmd_record_btrace_pt_start (const char *args, int from_tty)
2909 if (args != NULL && *args != 0)
2910 error (_("Invalid argument."));
2912 record_btrace_conf.format = BTRACE_FORMAT_PT;
2916 execute_command ("target record-btrace", from_tty);
2918 CATCH (exception, RETURN_MASK_ALL)
2920 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2921 throw_exception (exception);
2926 /* Alias for "target record". */
2929 cmd_record_btrace_start (const char *args, int from_tty)
2931 if (args != NULL && *args != 0)
2932 error (_("Invalid argument."));
2934 record_btrace_conf.format = BTRACE_FORMAT_PT;
2938 execute_command ("target record-btrace", from_tty);
2940 CATCH (exception, RETURN_MASK_ALL)
2942 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2946 execute_command ("target record-btrace", from_tty);
2948 CATCH (ex, RETURN_MASK_ALL)
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2951 throw_exception (ex);
2958 /* The "set record btrace" command. */
2961 cmd_set_record_btrace (const char *args, int from_tty)
2963 printf_unfiltered (_("\"set record btrace\" must be followed "
2964 "by an appropriate subcommand.\n"));
2965 help_list (set_record_btrace_cmdlist, "set record btrace ",
2966 all_commands, gdb_stdout);
2969 /* The "show record btrace" command. */
2972 cmd_show_record_btrace (const char *args, int from_tty)
2974 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2977 /* The "show record btrace replay-memory-access" command. */
2980 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2981 struct cmd_list_element *c, const char *value)
2983 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2984 replay_memory_access);
2987 /* The "set record btrace cpu none" command. */
2990 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2995 record_btrace_cpu_state = CS_NONE;
2998 /* The "set record btrace cpu auto" command. */
3001 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3003 if (args != nullptr && *args != 0)
3004 error (_("Trailing junk: '%s'."), args);
3006 record_btrace_cpu_state = CS_AUTO;
3009 /* The "set record btrace cpu" command. */
3012 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3014 if (args == nullptr)
3017 /* We use a hard-coded vendor string for now. */
3018 unsigned int family, model, stepping;
3019 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3020 &model, &l1, &stepping, &l2);
3023 if (strlen (args) != l2)
3024 error (_("Trailing junk: '%s'."), args + l2);
3026 else if (matches == 2)
3028 if (strlen (args) != l1)
3029 error (_("Trailing junk: '%s'."), args + l1);
3034 error (_("Bad format. See \"help set record btrace cpu\"."));
3036 if (USHRT_MAX < family)
3037 error (_("Cpu family too big."));
3039 if (UCHAR_MAX < model)
3040 error (_("Cpu model too big."));
3042 if (UCHAR_MAX < stepping)
3043 error (_("Cpu stepping too big."));
3045 record_btrace_cpu.vendor = CV_INTEL;
3046 record_btrace_cpu.family = family;
3047 record_btrace_cpu.model = model;
3048 record_btrace_cpu.stepping = stepping;
3050 record_btrace_cpu_state = CS_CPU;
3053 /* The "show record btrace cpu" command. */
3056 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3058 if (args != nullptr && *args != 0)
3059 error (_("Trailing junk: '%s'."), args);
3061 switch (record_btrace_cpu_state)
3064 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3068 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3072 switch (record_btrace_cpu.vendor)
3075 if (record_btrace_cpu.stepping == 0)
3076 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3077 record_btrace_cpu.family,
3078 record_btrace_cpu.model);
3080 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3081 record_btrace_cpu.family,
3082 record_btrace_cpu.model,
3083 record_btrace_cpu.stepping);
3088 error (_("Internal error: bad cpu state."));
3091 /* The "s record btrace bts" command. */
3094 cmd_set_record_btrace_bts (const char *args, int from_tty)
3096 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3097 "by an appropriate subcommand.\n"));
3098 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3099 all_commands, gdb_stdout);
3102 /* The "show record btrace bts" command. */
3105 cmd_show_record_btrace_bts (const char *args, int from_tty)
3107 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3110 /* The "set record btrace pt" command. */
3113 cmd_set_record_btrace_pt (const char *args, int from_tty)
3115 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3116 "by an appropriate subcommand.\n"));
3117 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3118 all_commands, gdb_stdout);
3121 /* The "show record btrace pt" command. */
3124 cmd_show_record_btrace_pt (const char *args, int from_tty)
3126 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3129 /* The "record bts buffer-size" show value function. */
3132 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3136 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3140 /* The "record pt buffer-size" show value function. */
3143 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3144 struct cmd_list_element *c,
3147 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3151 /* Initialize btrace commands. */
3154 _initialize_record_btrace (void)
3156 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3157 _("Start branch trace recording."), &record_btrace_cmdlist,
3158 "record btrace ", 0, &record_cmdlist);
3159 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3161 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3163 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3164 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3165 This format may not be available on all processors."),
3166 &record_btrace_cmdlist);
3167 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3169 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3171 Start branch trace recording in Intel Processor Trace format.\n\n\
3172 This format may not be available on all processors."),
3173 &record_btrace_cmdlist);
3174 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3176 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3177 _("Set record options"), &set_record_btrace_cmdlist,
3178 "set record btrace ", 0, &set_record_cmdlist);
3180 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3181 _("Show record options"), &show_record_btrace_cmdlist,
3182 "show record btrace ", 0, &show_record_cmdlist);
3184 add_setshow_enum_cmd ("replay-memory-access", no_class,
3185 replay_memory_access_types, &replay_memory_access, _("\
3186 Set what memory accesses are allowed during replay."), _("\
3187 Show what memory accesses are allowed during replay."),
3188 _("Default is READ-ONLY.\n\n\
3189 The btrace record target does not trace data.\n\
3190 The memory therefore corresponds to the live target and not \
3191 to the current replay position.\n\n\
3192 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3193 When READ-WRITE, allow accesses to read-only and read-write memory during \
3195 NULL, cmd_show_replay_memory_access,
3196 &set_record_btrace_cmdlist,
3197 &show_record_btrace_cmdlist);
3199 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3201 Set the cpu to be used for trace decode.\n\n\
3202 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3203 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3204 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3205 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3206 When GDB does not support that cpu, this option can be used to enable\n\
3207 workarounds for a similar cpu that GDB supports.\n\n\
3208 When set to \"none\", errata workarounds are disabled."),
3209 &set_record_btrace_cpu_cmdlist,
3210 _("set record btrace cpu "), 1,
3211 &set_record_btrace_cmdlist);
3213 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3214 Automatically determine the cpu to be used for trace decode."),
3215 &set_record_btrace_cpu_cmdlist);
3217 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3218 Do not enable errata workarounds for trace decode."),
3219 &set_record_btrace_cpu_cmdlist);
3221 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3222 Show the cpu to be used for trace decode."),
3223 &show_record_btrace_cmdlist);
3225 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3226 _("Set record btrace bts options"),
3227 &set_record_btrace_bts_cmdlist,
3228 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3230 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3231 _("Show record btrace bts options"),
3232 &show_record_btrace_bts_cmdlist,
3233 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3235 add_setshow_uinteger_cmd ("buffer-size", no_class,
3236 &record_btrace_conf.bts.size,
3237 _("Set the record/replay bts buffer size."),
3238 _("Show the record/replay bts buffer size."), _("\
3239 When starting recording request a trace buffer of this size. \
3240 The actual buffer size may differ from the requested size. \
3241 Use \"info record\" to see the actual buffer size.\n\n\
3242 Bigger buffers allow longer recording but also take more time to process \
3243 the recorded execution trace.\n\n\
3244 The trace buffer size may not be changed while recording."), NULL,
3245 show_record_bts_buffer_size_value,
3246 &set_record_btrace_bts_cmdlist,
3247 &show_record_btrace_bts_cmdlist);
3249 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3250 _("Set record btrace pt options"),
3251 &set_record_btrace_pt_cmdlist,
3252 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3254 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3255 _("Show record btrace pt options"),
3256 &show_record_btrace_pt_cmdlist,
3257 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3259 add_setshow_uinteger_cmd ("buffer-size", no_class,
3260 &record_btrace_conf.pt.size,
3261 _("Set the record/replay pt buffer size."),
3262 _("Show the record/replay pt buffer size."), _("\
3263 Bigger buffers allow longer recording but also take more time to process \
3264 the recorded execution.\n\
3265 The actual buffer size may differ from the requested size. Use \"info record\" \
3266 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3267 &set_record_btrace_pt_cmdlist,
3268 &show_record_btrace_pt_cmdlist);
3270 add_target (record_btrace_target_info, record_btrace_target_open);
3272 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3275 record_btrace_conf.bts.size = 64 * 1024;
3276 record_btrace_conf.pt.size = 16 * 1024;