1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 static const target_info record_btrace_target_info = {
46 N_("Branch tracing target"),
47 N_("Collect control-flow trace and provide the execution history.")
50 /* The target_ops of record-btrace. */
52 class record_btrace_target final : public target_ops
55 record_btrace_target ()
56 { to_stratum = record_stratum; }
58 const target_info &info () const override
59 { return record_btrace_target_info; }
61 void close () override;
62 void async (int) override;
64 void detach (inferior *inf, int from_tty) override
65 { record_detach (this, inf, from_tty); }
67 void disconnect (const char *, int) override;
69 void mourn_inferior () override
70 { record_mourn_inferior (this); }
73 { record_kill (this); }
75 enum record_method record_method (ptid_t ptid) override;
77 void stop_recording () override;
78 void info_record () override;
80 void insn_history (int size, gdb_disassembly_flags flags) override;
81 void insn_history_from (ULONGEST from, int size,
82 gdb_disassembly_flags flags) override;
83 void insn_history_range (ULONGEST begin, ULONGEST end,
84 gdb_disassembly_flags flags) override;
85 void call_history (int size, record_print_flags flags) override;
86 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 bool record_is_replaying (ptid_t ptid) override;
92 bool record_will_replay (ptid_t ptid, int dir) override;
93 void record_stop_replaying () override;
95 enum target_xfer_status xfer_partial (enum target_object object,
98 const gdb_byte *writebuf,
99 ULONGEST offset, ULONGEST len,
100 ULONGEST *xfered_len) override;
102 int insert_breakpoint (struct gdbarch *,
103 struct bp_target_info *) override;
104 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
105 enum remove_bp_reason) override;
107 void fetch_registers (struct regcache *, int) override;
109 void store_registers (struct regcache *, int) override;
110 void prepare_to_store (struct regcache *) override;
112 const struct frame_unwind *get_unwinder () override;
114 const struct frame_unwind *get_tailcall_unwinder () override;
116 void commit_resume () override;
117 void resume (ptid_t, int, enum gdb_signal) override;
118 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120 void stop (ptid_t) override;
121 void update_thread_list () override;
122 bool thread_alive (ptid_t ptid) override;
123 void goto_record_begin () override;
124 void goto_record_end () override;
125 void goto_record (ULONGEST insn) override;
127 bool can_execute_reverse () override;
129 bool stopped_by_sw_breakpoint () override;
130 bool supports_stopped_by_sw_breakpoint () override;
132 bool stopped_by_hw_breakpoint () override;
133 bool supports_stopped_by_hw_breakpoint () override;
135 enum exec_direction_kind execution_direction () override;
136 void prepare_to_generate_core () override;
137 void done_generating_core () override;
140 static record_btrace_target record_btrace_ops;
142 /* Initialize the record-btrace target ops. */
144 /* Token associated with a new-thread observer enabling branch tracing
145 for the new thread. */
146 static const gdb::observers::token record_btrace_thread_observer_token;
148 /* Memory access types used in set/show record btrace replay-memory-access. */
149 static const char replay_memory_access_read_only[] = "read-only";
150 static const char replay_memory_access_read_write[] = "read-write";
151 static const char *const replay_memory_access_types[] =
153 replay_memory_access_read_only,
154 replay_memory_access_read_write,
158 /* The currently allowed replay memory access type. */
159 static const char *replay_memory_access = replay_memory_access_read_only;
161 /* The cpu state kinds. */
162 enum record_btrace_cpu_state_kind
169 /* The current cpu state. */
170 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172 /* The current cpu for trace decode. */
173 static struct btrace_cpu record_btrace_cpu;
175 /* Command lists for "set/show record btrace". */
176 static struct cmd_list_element *set_record_btrace_cmdlist;
177 static struct cmd_list_element *show_record_btrace_cmdlist;
179 /* The execution direction of the last resume we got. See record-full.c. */
180 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182 /* The async event handler for reverse/replay execution. */
183 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185 /* A flag indicating that we are currently generating a core file. */
186 static int record_btrace_generating_corefile;
188 /* The current branch trace configuration. */
189 static struct btrace_config record_btrace_conf;
191 /* Command list for "record btrace". */
192 static struct cmd_list_element *record_btrace_cmdlist;
194 /* Command lists for "set/show record btrace bts". */
195 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
196 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198 /* Command lists for "set/show record btrace pt". */
199 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
200 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202 /* Command list for "set record btrace cpu". */
203 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205 /* Print a record-btrace debug message. Use do ... while (0) to avoid
206 ambiguities when used in if statements. */
208 #define DEBUG(msg, args...) \
211 if (record_debug != 0) \
212 fprintf_unfiltered (gdb_stdlog, \
213 "[record-btrace] " msg "\n", ##args); \
218 /* Return the cpu configured by the user. Returns NULL if the cpu was
219 configured as auto. */
220 const struct btrace_cpu *
221 record_btrace_get_cpu (void)
223 switch (record_btrace_cpu_state)
229 record_btrace_cpu.vendor = CV_UNKNOWN;
232 return &record_btrace_cpu;
235 error (_("Internal error: bad record btrace cpu state."));
238 /* Update the branch trace for the current thread and return a pointer to its
241 Throws an error if there is no thread or no trace. This function never
244 static struct thread_info *
245 require_btrace_thread (void)
247 struct thread_info *tp;
251 tp = find_thread_ptid (inferior_ptid);
253 error (_("No thread."));
255 validate_registers_access ();
257 btrace_fetch (tp, record_btrace_get_cpu ());
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
268 Throws an error if there is no thread or no trace. This function never
271 static struct btrace_thread_info *
272 require_btrace (void)
274 struct thread_info *tp;
276 tp = require_btrace_thread ();
281 /* Enable branch tracing for one thread. Warn on errors. */
284 record_btrace_enable_warn (struct thread_info *tp)
288 btrace_enable (tp, &record_btrace_conf);
290 CATCH (error, RETURN_MASK_ERROR)
292 warning ("%s", error.message);
297 /* Enable automatic tracing of new threads. */
300 record_btrace_auto_enable (void)
302 DEBUG ("attach thread observer");
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
308 /* Disable automatic tracing of new threads. */
311 record_btrace_auto_disable (void)
313 DEBUG ("detach thread observer");
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
318 /* The record-btrace async event handler function. */
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
323 inferior_event_handler (INF_REG_EVENT, NULL);
326 /* See record-btrace.h. */
329 record_btrace_push_target (void)
333 record_btrace_auto_enable ();
335 push_target (&record_btrace_ops);
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
340 record_btrace_generating_corefile = 0;
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
346 /* Disable btrace on a set of threads on scope exit. */
348 struct scoped_btrace_disable
350 scoped_btrace_disable () = default;
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
354 ~scoped_btrace_disable ()
356 for (thread_info *tp : m_threads)
360 void add_thread (thread_info *thread)
362 m_threads.push_front (thread);
371 std::forward_list<thread_info *> m_threads;
374 /* Open target record-btrace. */
377 record_btrace_target_open (const char *args, int from_tty)
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
382 struct thread_info *tp;
388 if (!target_has_execution)
389 error (_("The program is not being run."));
391 ALL_NON_EXITED_THREADS (tp)
392 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
394 btrace_enable (tp, &record_btrace_conf);
396 btrace_disable.add_thread (tp);
399 record_btrace_push_target ();
401 btrace_disable.discard ();
404 /* The stop_recording method of target record-btrace. */
407 record_btrace_target::stop_recording ()
409 struct thread_info *tp;
411 DEBUG ("stop recording");
413 record_btrace_auto_disable ();
415 ALL_NON_EXITED_THREADS (tp)
416 if (tp->btrace.target != NULL)
420 /* The disconnect method of target record-btrace. */
423 record_btrace_target::disconnect (const char *args,
426 struct target_ops *beneath = this->beneath;
428 /* Do not stop recording, just clean up GDB side. */
429 unpush_target (this);
431 /* Forward disconnect. */
432 beneath->disconnect (args, from_tty);
435 /* The close method of target record-btrace. */
438 record_btrace_target::close ()
440 struct thread_info *tp;
442 if (record_btrace_async_inferior_event_handler != NULL)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
451 ALL_NON_EXITED_THREADS (tp)
452 btrace_teardown (tp);
455 /* The async method of target record-btrace. */
458 record_btrace_target::async (int enable)
461 mark_async_event_handler (record_btrace_async_inferior_event_handler);
463 clear_async_event_handler (record_btrace_async_inferior_event_handler);
465 this->beneath->async (enable);
468 /* Adjusts the size and returns a human readable size suffix. */
471 record_btrace_adjust_size (unsigned int *size)
477 if ((sz & ((1u << 30) - 1)) == 0)
482 else if ((sz & ((1u << 20) - 1)) == 0)
487 else if ((sz & ((1u << 10) - 1)) == 0)
496 /* Print a BTS configuration. */
499 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
507 suffix = record_btrace_adjust_size (&size);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
512 /* Print an Intel Processor Trace configuration. */
515 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
523 suffix = record_btrace_adjust_size (&size);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
528 /* Print a branch tracing configuration. */
531 record_btrace_print_conf (const struct btrace_config *conf)
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf->format));
536 switch (conf->format)
538 case BTRACE_FORMAT_NONE:
541 case BTRACE_FORMAT_BTS:
542 record_btrace_print_bts_conf (&conf->bts);
545 case BTRACE_FORMAT_PT:
546 record_btrace_print_pt_conf (&conf->pt);
550 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
553 /* The info_record method of target record-btrace. */
556 record_btrace_target::info_record ()
558 struct btrace_thread_info *btinfo;
559 const struct btrace_config *conf;
560 struct thread_info *tp;
561 unsigned int insns, calls, gaps;
565 tp = find_thread_ptid (inferior_ptid);
567 error (_("No thread."));
569 validate_registers_access ();
571 btinfo = &tp->btrace;
573 conf = ::btrace_conf (btinfo);
575 record_btrace_print_conf (conf);
577 btrace_fetch (tp, record_btrace_get_cpu ());
583 if (!btrace_is_empty (tp))
585 struct btrace_call_iterator call;
586 struct btrace_insn_iterator insn;
588 btrace_call_end (&call, btinfo);
589 btrace_call_prev (&call, 1);
590 calls = btrace_call_number (&call);
592 btrace_insn_end (&insn, btinfo);
593 insns = btrace_insn_number (&insn);
595 /* If the last instruction is not a gap, it is the current instruction
596 that is not actually part of the record. */
597 if (btrace_insn_get (&insn) != NULL)
600 gaps = btinfo->ngaps;
603 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
604 "for thread %s (%s).\n"), insns, calls, gaps,
605 print_thread_id (tp), target_pid_to_str (tp->ptid));
607 if (btrace_is_replaying (tp))
608 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
609 btrace_insn_number (btinfo->replay));
612 /* Print a decode error. */
615 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
616 enum btrace_format format)
618 const char *errstr = btrace_decode_error (format, errcode);
620 uiout->text (_("["));
621 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
622 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
624 uiout->text (_("decode error ("));
625 uiout->field_int ("errcode", errcode);
626 uiout->text (_("): "));
628 uiout->text (errstr);
629 uiout->text (_("]\n"));
632 /* Print an unsigned int. */
635 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
637 uiout->field_fmt (fld, "%u", val);
640 /* A range of source lines. */
642 struct btrace_line_range
644 /* The symtab this line is from. */
645 struct symtab *symtab;
647 /* The first line (inclusive). */
650 /* The last line (exclusive). */
654 /* Construct a line range. */
656 static struct btrace_line_range
657 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
659 struct btrace_line_range range;
661 range.symtab = symtab;
668 /* Add a line to a line range. */
670 static struct btrace_line_range
671 btrace_line_range_add (struct btrace_line_range range, int line)
673 if (range.end <= range.begin)
675 /* This is the first entry. */
677 range.end = line + 1;
679 else if (line < range.begin)
681 else if (range.end < line)
687 /* Return non-zero if RANGE is empty, zero otherwise. */
690 btrace_line_range_is_empty (struct btrace_line_range range)
692 return range.end <= range.begin;
695 /* Return non-zero if LHS contains RHS, zero otherwise. */
698 btrace_line_range_contains_range (struct btrace_line_range lhs,
699 struct btrace_line_range rhs)
701 return ((lhs.symtab == rhs.symtab)
702 && (lhs.begin <= rhs.begin)
703 && (rhs.end <= lhs.end));
706 /* Find the line range associated with PC. */
708 static struct btrace_line_range
709 btrace_find_line_range (CORE_ADDR pc)
711 struct btrace_line_range range;
712 struct linetable_entry *lines;
713 struct linetable *ltable;
714 struct symtab *symtab;
717 symtab = find_pc_line_symtab (pc);
719 return btrace_mk_line_range (NULL, 0, 0);
721 ltable = SYMTAB_LINETABLE (symtab);
723 return btrace_mk_line_range (symtab, 0, 0);
725 nlines = ltable->nitems;
726 lines = ltable->item;
728 return btrace_mk_line_range (symtab, 0, 0);
730 range = btrace_mk_line_range (symtab, 0, 0);
731 for (i = 0; i < nlines - 1; i++)
733 if ((lines[i].pc == pc) && (lines[i].line != 0))
734 range = btrace_line_range_add (range, lines[i].line);
740 /* Print source lines in LINES to UIOUT.
742 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
743 instructions corresponding to that source line. When printing a new source
744 line, we do the cleanups for the open chain and open a new cleanup chain for
745 the new source line. If the source line range in LINES is not empty, this
746 function will leave the cleanup chain for the last printed source line open
747 so instructions can be added to it. */
750 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
751 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
752 gdb::optional<ui_out_emit_list> *asm_list,
753 gdb_disassembly_flags flags)
755 print_source_lines_flags psl_flags;
757 if (flags & DISASSEMBLY_FILENAME)
758 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
760 for (int line = lines.begin; line < lines.end; ++line)
764 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
766 print_source_lines (lines.symtab, line, line + 1, psl_flags);
768 asm_list->emplace (uiout, "line_asm_insn");
772 /* Disassemble a section of the recorded instruction trace. */
775 btrace_insn_history (struct ui_out *uiout,
776 const struct btrace_thread_info *btinfo,
777 const struct btrace_insn_iterator *begin,
778 const struct btrace_insn_iterator *end,
779 gdb_disassembly_flags flags)
781 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
782 btrace_insn_number (begin), btrace_insn_number (end));
784 flags |= DISASSEMBLY_SPECULATIVE;
786 struct gdbarch *gdbarch = target_gdbarch ();
787 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
789 ui_out_emit_list list_emitter (uiout, "asm_insns");
791 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
792 gdb::optional<ui_out_emit_list> asm_list;
794 gdb_pretty_print_disassembler disasm (gdbarch);
796 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
797 btrace_insn_next (&it, 1))
799 const struct btrace_insn *insn;
801 insn = btrace_insn_get (&it);
803 /* A NULL instruction indicates a gap in the trace. */
806 const struct btrace_config *conf;
808 conf = btrace_conf (btinfo);
810 /* We have trace so we must have a configuration. */
811 gdb_assert (conf != NULL);
813 uiout->field_fmt ("insn-number", "%u",
814 btrace_insn_number (&it));
817 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
822 struct disasm_insn dinsn;
824 if ((flags & DISASSEMBLY_SOURCE) != 0)
826 struct btrace_line_range lines;
828 lines = btrace_find_line_range (insn->pc);
829 if (!btrace_line_range_is_empty (lines)
830 && !btrace_line_range_contains_range (last_lines, lines))
832 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
836 else if (!src_and_asm_tuple.has_value ())
838 gdb_assert (!asm_list.has_value ());
840 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
842 /* No source information. */
843 asm_list.emplace (uiout, "line_asm_insn");
846 gdb_assert (src_and_asm_tuple.has_value ());
847 gdb_assert (asm_list.has_value ());
850 memset (&dinsn, 0, sizeof (dinsn));
851 dinsn.number = btrace_insn_number (&it);
852 dinsn.addr = insn->pc;
854 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
855 dinsn.is_speculative = 1;
857 disasm.pretty_print_insn (uiout, &dinsn, flags);
862 /* The insn_history method of target record-btrace. */
865 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
867 struct btrace_thread_info *btinfo;
868 struct btrace_insn_history *history;
869 struct btrace_insn_iterator begin, end;
870 struct ui_out *uiout;
871 unsigned int context, covered;
873 uiout = current_uiout;
874 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
875 context = abs (size);
877 error (_("Bad record instruction-history-size."));
879 btinfo = require_btrace ();
880 history = btinfo->insn_history;
883 struct btrace_insn_iterator *replay;
885 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
887 /* If we're replaying, we start at the replay position. Otherwise, we
888 start at the tail of the trace. */
889 replay = btinfo->replay;
893 btrace_insn_end (&begin, btinfo);
895 /* We start from here and expand in the requested direction. Then we
896 expand in the other direction, as well, to fill up any remaining
901 /* We want the current position covered, as well. */
902 covered = btrace_insn_next (&end, 1);
903 covered += btrace_insn_prev (&begin, context - covered);
904 covered += btrace_insn_next (&end, context - covered);
908 covered = btrace_insn_next (&end, context);
909 covered += btrace_insn_prev (&begin, context - covered);
914 begin = history->begin;
917 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
918 btrace_insn_number (&begin), btrace_insn_number (&end));
923 covered = btrace_insn_prev (&begin, context);
928 covered = btrace_insn_next (&end, context);
933 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
937 printf_unfiltered (_("At the start of the branch trace record.\n"));
939 printf_unfiltered (_("At the end of the branch trace record.\n"));
942 btrace_set_insn_history (btinfo, &begin, &end);
945 /* The insn_history_range method of target record-btrace. */
948 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
949 gdb_disassembly_flags flags)
951 struct btrace_thread_info *btinfo;
952 struct btrace_insn_iterator begin, end;
953 struct ui_out *uiout;
954 unsigned int low, high;
957 uiout = current_uiout;
958 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
962 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
964 /* Check for wrap-arounds. */
965 if (low != from || high != to)
966 error (_("Bad range."));
969 error (_("Bad range."));
971 btinfo = require_btrace ();
973 found = btrace_find_insn_by_number (&begin, btinfo, low);
975 error (_("Range out of bounds."));
977 found = btrace_find_insn_by_number (&end, btinfo, high);
980 /* Silently truncate the range. */
981 btrace_insn_end (&end, btinfo);
985 /* We want both begin and end to be inclusive. */
986 btrace_insn_next (&end, 1);
989 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
990 btrace_set_insn_history (btinfo, &begin, &end);
993 /* The insn_history_from method of target record-btrace. */
996 record_btrace_target::insn_history_from (ULONGEST from, int size,
997 gdb_disassembly_flags flags)
999 ULONGEST begin, end, context;
1001 context = abs (size);
1003 error (_("Bad record instruction-history-size."));
1012 begin = from - context + 1;
1017 end = from + context - 1;
1019 /* Check for wrap-around. */
1024 insn_history_range (begin, end, flags);
1027 /* Print the instruction number range for a function call history line. */
1030 btrace_call_history_insn_range (struct ui_out *uiout,
1031 const struct btrace_function *bfun)
1033 unsigned int begin, end, size;
1035 size = bfun->insn.size ();
1036 gdb_assert (size > 0);
1038 begin = bfun->insn_offset;
1039 end = begin + size - 1;
1041 ui_out_field_uint (uiout, "insn begin", begin);
1043 ui_out_field_uint (uiout, "insn end", end);
1046 /* Compute the lowest and highest source line for the instructions in BFUN
1047 and return them in PBEGIN and PEND.
1048 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1049 result from inlining or macro expansion. */
1052 btrace_compute_src_line_range (const struct btrace_function *bfun,
1053 int *pbegin, int *pend)
1055 struct symtab *symtab;
1066 symtab = symbol_symtab (sym);
1068 for (const btrace_insn &insn : bfun->insn)
1070 struct symtab_and_line sal;
1072 sal = find_pc_line (insn.pc, 0);
1073 if (sal.symtab != symtab || sal.line == 0)
1076 begin = std::min (begin, sal.line);
1077 end = std::max (end, sal.line);
1085 /* Print the source line information for a function call history line. */
1088 btrace_call_history_src_line (struct ui_out *uiout,
1089 const struct btrace_function *bfun)
1098 uiout->field_string ("file",
1099 symtab_to_filename_for_display (symbol_symtab (sym)));
1101 btrace_compute_src_line_range (bfun, &begin, &end);
1106 uiout->field_int ("min line", begin);
1112 uiout->field_int ("max line", end);
1115 /* Get the name of a branch trace function. */
1118 btrace_get_bfun_name (const struct btrace_function *bfun)
1120 struct minimal_symbol *msym;
1130 return SYMBOL_PRINT_NAME (sym);
1131 else if (msym != NULL)
1132 return MSYMBOL_PRINT_NAME (msym);
1137 /* Disassemble a section of the recorded function trace. */
1140 btrace_call_history (struct ui_out *uiout,
1141 const struct btrace_thread_info *btinfo,
1142 const struct btrace_call_iterator *begin,
1143 const struct btrace_call_iterator *end,
1146 struct btrace_call_iterator it;
1147 record_print_flags flags = (enum record_print_flag) int_flags;
1149 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1150 btrace_call_number (end));
1152 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1154 const struct btrace_function *bfun;
1155 struct minimal_symbol *msym;
1158 bfun = btrace_call_get (&it);
1162 /* Print the function index. */
1163 ui_out_field_uint (uiout, "index", bfun->number);
1166 /* Indicate gaps in the trace. */
1167 if (bfun->errcode != 0)
1169 const struct btrace_config *conf;
1171 conf = btrace_conf (btinfo);
1173 /* We have trace so we must have a configuration. */
1174 gdb_assert (conf != NULL);
1176 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1181 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1183 int level = bfun->level + btinfo->level, i;
1185 for (i = 0; i < level; ++i)
1190 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1191 else if (msym != NULL)
1192 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1193 else if (!uiout->is_mi_like_p ())
1194 uiout->field_string ("function", "??");
1196 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1198 uiout->text (_("\tinst "));
1199 btrace_call_history_insn_range (uiout, bfun);
1202 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1204 uiout->text (_("\tat "));
1205 btrace_call_history_src_line (uiout, bfun);
1212 /* The call_history method of target record-btrace. */
1215 record_btrace_target::call_history (int size, record_print_flags flags)
1217 struct btrace_thread_info *btinfo;
1218 struct btrace_call_history *history;
1219 struct btrace_call_iterator begin, end;
1220 struct ui_out *uiout;
1221 unsigned int context, covered;
1223 uiout = current_uiout;
1224 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1225 context = abs (size);
1227 error (_("Bad record function-call-history-size."));
1229 btinfo = require_btrace ();
1230 history = btinfo->call_history;
1231 if (history == NULL)
1233 struct btrace_insn_iterator *replay;
1235 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1237 /* If we're replaying, we start at the replay position. Otherwise, we
1238 start at the tail of the trace. */
1239 replay = btinfo->replay;
1242 begin.btinfo = btinfo;
1243 begin.index = replay->call_index;
1246 btrace_call_end (&begin, btinfo);
1248 /* We start from here and expand in the requested direction. Then we
1249 expand in the other direction, as well, to fill up any remaining
1254 /* We want the current position covered, as well. */
1255 covered = btrace_call_next (&end, 1);
1256 covered += btrace_call_prev (&begin, context - covered);
1257 covered += btrace_call_next (&end, context - covered);
1261 covered = btrace_call_next (&end, context);
1262 covered += btrace_call_prev (&begin, context- covered);
1267 begin = history->begin;
1270 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1271 btrace_call_number (&begin), btrace_call_number (&end));
1276 covered = btrace_call_prev (&begin, context);
1281 covered = btrace_call_next (&end, context);
1286 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1290 printf_unfiltered (_("At the start of the branch trace record.\n"));
1292 printf_unfiltered (_("At the end of the branch trace record.\n"));
1295 btrace_set_call_history (btinfo, &begin, &end);
1298 /* The call_history_range method of target record-btrace. */
1301 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1302 record_print_flags flags)
1304 struct btrace_thread_info *btinfo;
1305 struct btrace_call_iterator begin, end;
1306 struct ui_out *uiout;
1307 unsigned int low, high;
1310 uiout = current_uiout;
1311 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1315 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1317 /* Check for wrap-arounds. */
1318 if (low != from || high != to)
1319 error (_("Bad range."));
1322 error (_("Bad range."));
1324 btinfo = require_btrace ();
1326 found = btrace_find_call_by_number (&begin, btinfo, low);
1328 error (_("Range out of bounds."));
1330 found = btrace_find_call_by_number (&end, btinfo, high);
1333 /* Silently truncate the range. */
1334 btrace_call_end (&end, btinfo);
1338 /* We want both begin and end to be inclusive. */
1339 btrace_call_next (&end, 1);
1342 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1343 btrace_set_call_history (btinfo, &begin, &end);
1346 /* The call_history_from method of target record-btrace. */
1349 record_btrace_target::call_history_from (ULONGEST from, int size,
1350 record_print_flags flags)
1352 ULONGEST begin, end, context;
1354 context = abs (size);
1356 error (_("Bad record function-call-history-size."));
1365 begin = from - context + 1;
1370 end = from + context - 1;
1372 /* Check for wrap-around. */
1377 call_history_range ( begin, end, flags);
1380 /* The record_method method of target record-btrace. */
1383 record_btrace_target::record_method (ptid_t ptid)
1385 struct thread_info * const tp = find_thread_ptid (ptid);
1388 error (_("No thread."));
1390 if (tp->btrace.target == NULL)
1391 return RECORD_METHOD_NONE;
1393 return RECORD_METHOD_BTRACE;
1396 /* The record_is_replaying method of target record-btrace. */
1399 record_btrace_target::record_is_replaying (ptid_t ptid)
1401 struct thread_info *tp;
1403 ALL_NON_EXITED_THREADS (tp)
1404 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1410 /* The record_will_replay method of target record-btrace. */
1413 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1415 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1418 /* The xfer_partial method of target record-btrace. */
1420 enum target_xfer_status
1421 record_btrace_target::xfer_partial (enum target_object object,
1422 const char *annex, gdb_byte *readbuf,
1423 const gdb_byte *writebuf, ULONGEST offset,
1424 ULONGEST len, ULONGEST *xfered_len)
1426 /* Filter out requests that don't make sense during replay. */
1427 if (replay_memory_access == replay_memory_access_read_only
1428 && !record_btrace_generating_corefile
1429 && record_is_replaying (inferior_ptid))
1433 case TARGET_OBJECT_MEMORY:
1435 struct target_section *section;
1437 /* We do not allow writing memory in general. */
1438 if (writebuf != NULL)
1441 return TARGET_XFER_UNAVAILABLE;
1444 /* We allow reading readonly memory. */
1445 section = target_section_by_addr (this, offset);
1446 if (section != NULL)
1448 /* Check if the section we found is readonly. */
1449 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1450 section->the_bfd_section)
1451 & SEC_READONLY) != 0)
1453 /* Truncate the request to fit into this section. */
1454 len = std::min (len, section->endaddr - offset);
1460 return TARGET_XFER_UNAVAILABLE;
1465 /* Forward the request. */
1466 return this->beneath->xfer_partial (object, annex, readbuf, writebuf,
1467 offset, len, xfered_len);
1470 /* The insert_breakpoint method of target record-btrace. */
1473 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1474 struct bp_target_info *bp_tgt)
1479 /* Inserting breakpoints requires accessing memory. Allow it for the
1480 duration of this function. */
1481 old = replay_memory_access;
1482 replay_memory_access = replay_memory_access_read_write;
1487 ret = this->beneath->insert_breakpoint (gdbarch, bp_tgt);
1489 CATCH (except, RETURN_MASK_ALL)
1491 replay_memory_access = old;
1492 throw_exception (except);
1495 replay_memory_access = old;
1500 /* The remove_breakpoint method of target record-btrace. */
1503 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
1518 ret = this->beneath->remove_breakpoint (gdbarch, bp_tgt, reason);
1520 CATCH (except, RETURN_MASK_ALL)
1522 replay_memory_access = old;
1523 throw_exception (except);
1526 replay_memory_access = old;
1531 /* The fetch_registers method of target record-btrace. */
1534 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1536 struct btrace_insn_iterator *replay;
1537 struct thread_info *tp;
1539 tp = find_thread_ptid (regcache_get_ptid (regcache));
1540 gdb_assert (tp != NULL);
1542 replay = tp->btrace.replay;
1543 if (replay != NULL && !record_btrace_generating_corefile)
1545 const struct btrace_insn *insn;
1546 struct gdbarch *gdbarch;
1549 gdbarch = regcache->arch ();
1550 pcreg = gdbarch_pc_regnum (gdbarch);
1554 /* We can only provide the PC register. */
1555 if (regno >= 0 && regno != pcreg)
1558 insn = btrace_insn_get (replay);
1559 gdb_assert (insn != NULL);
1561 regcache_raw_supply (regcache, regno, &insn->pc);
1564 this->beneath->fetch_registers (regcache, regno);
1567 /* The store_registers method of target record-btrace. */
1570 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1572 struct target_ops *t;
1574 if (!record_btrace_generating_corefile
1575 && record_is_replaying (regcache_get_ptid (regcache)))
1576 error (_("Cannot write registers while replaying."));
1578 gdb_assert (may_write_registers != 0);
1580 this->beneath->store_registers (regcache, regno);
1583 /* The prepare_to_store method of target record-btrace. */
1586 record_btrace_target::prepare_to_store (struct regcache *regcache)
1588 if (!record_btrace_generating_corefile
1589 && record_is_replaying (regcache_get_ptid (regcache)))
1592 this->beneath->prepare_to_store (regcache);
1595 /* The branch trace frame cache. */
1597 struct btrace_frame_cache
1600 struct thread_info *tp;
1602 /* The frame info. */
1603 struct frame_info *frame;
1605 /* The branch trace function segment. */
1606 const struct btrace_function *bfun;
1609 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1611 static htab_t bfcache;
1613 /* hash_f for htab_create_alloc of bfcache. */
1616 bfcache_hash (const void *arg)
1618 const struct btrace_frame_cache *cache
1619 = (const struct btrace_frame_cache *) arg;
1621 return htab_hash_pointer (cache->frame);
1624 /* eq_f for htab_create_alloc of bfcache. */
1627 bfcache_eq (const void *arg1, const void *arg2)
1629 const struct btrace_frame_cache *cache1
1630 = (const struct btrace_frame_cache *) arg1;
1631 const struct btrace_frame_cache *cache2
1632 = (const struct btrace_frame_cache *) arg2;
1634 return cache1->frame == cache2->frame;
1637 /* Create a new btrace frame cache. */
1639 static struct btrace_frame_cache *
1640 bfcache_new (struct frame_info *frame)
1642 struct btrace_frame_cache *cache;
1645 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1646 cache->frame = frame;
1648 slot = htab_find_slot (bfcache, cache, INSERT);
1649 gdb_assert (*slot == NULL);
1655 /* Extract the branch trace function from a branch trace frame. */
1657 static const struct btrace_function *
1658 btrace_get_frame_function (struct frame_info *frame)
1660 const struct btrace_frame_cache *cache;
1661 struct btrace_frame_cache pattern;
1664 pattern.frame = frame;
1666 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1670 cache = (const struct btrace_frame_cache *) *slot;
1674 /* Implement stop_reason method for record_btrace_frame_unwind. */
1676 static enum unwind_stop_reason
1677 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1680 const struct btrace_frame_cache *cache;
1681 const struct btrace_function *bfun;
1683 cache = (const struct btrace_frame_cache *) *this_cache;
1685 gdb_assert (bfun != NULL);
1688 return UNWIND_UNAVAILABLE;
1690 return UNWIND_NO_REASON;
1693 /* Implement this_id method for record_btrace_frame_unwind. */
1696 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1697 struct frame_id *this_id)
1699 const struct btrace_frame_cache *cache;
1700 const struct btrace_function *bfun;
1701 struct btrace_call_iterator it;
1702 CORE_ADDR code, special;
1704 cache = (const struct btrace_frame_cache *) *this_cache;
1707 gdb_assert (bfun != NULL);
1709 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1710 bfun = btrace_call_get (&it);
1712 code = get_frame_func (this_frame);
1713 special = bfun->number;
1715 *this_id = frame_id_build_unavailable_stack_special (code, special);
1717 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1718 btrace_get_bfun_name (cache->bfun),
1719 core_addr_to_string_nz (this_id->code_addr),
1720 core_addr_to_string_nz (this_id->special_addr));
1723 /* Implement prev_register method for record_btrace_frame_unwind. */
1725 static struct value *
1726 record_btrace_frame_prev_register (struct frame_info *this_frame,
1730 const struct btrace_frame_cache *cache;
1731 const struct btrace_function *bfun, *caller;
1732 struct btrace_call_iterator it;
1733 struct gdbarch *gdbarch;
1737 gdbarch = get_frame_arch (this_frame);
1738 pcreg = gdbarch_pc_regnum (gdbarch);
1739 if (pcreg < 0 || regnum != pcreg)
1740 throw_error (NOT_AVAILABLE_ERROR,
1741 _("Registers are not available in btrace record history"));
1743 cache = (const struct btrace_frame_cache *) *this_cache;
1745 gdb_assert (bfun != NULL);
1747 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1748 throw_error (NOT_AVAILABLE_ERROR,
1749 _("No caller in btrace record history"));
1751 caller = btrace_call_get (&it);
1753 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1754 pc = caller->insn.front ().pc;
1757 pc = caller->insn.back ().pc;
1758 pc += gdb_insn_length (gdbarch, pc);
1761 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1762 btrace_get_bfun_name (bfun), bfun->level,
1763 core_addr_to_string_nz (pc));
1765 return frame_unwind_got_address (this_frame, regnum, pc);
1768 /* Implement sniffer method for record_btrace_frame_unwind. */
1771 record_btrace_frame_sniffer (const struct frame_unwind *self,
1772 struct frame_info *this_frame,
1775 const struct btrace_function *bfun;
1776 struct btrace_frame_cache *cache;
1777 struct thread_info *tp;
1778 struct frame_info *next;
1780 /* THIS_FRAME does not contain a reference to its thread. */
1781 tp = find_thread_ptid (inferior_ptid);
1782 gdb_assert (tp != NULL);
1785 next = get_next_frame (this_frame);
1788 const struct btrace_insn_iterator *replay;
1790 replay = tp->btrace.replay;
1792 bfun = &replay->btinfo->functions[replay->call_index];
1796 const struct btrace_function *callee;
1797 struct btrace_call_iterator it;
1799 callee = btrace_get_frame_function (next);
1800 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1803 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1806 bfun = btrace_call_get (&it);
1812 DEBUG ("[frame] sniffed frame for %s on level %d",
1813 btrace_get_bfun_name (bfun), bfun->level);
1815 /* This is our frame. Initialize the frame cache. */
1816 cache = bfcache_new (this_frame);
1820 *this_cache = cache;
1824 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1827 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1828 struct frame_info *this_frame,
1831 const struct btrace_function *bfun, *callee;
1832 struct btrace_frame_cache *cache;
1833 struct btrace_call_iterator it;
1834 struct frame_info *next;
1835 struct thread_info *tinfo;
1837 next = get_next_frame (this_frame);
1841 callee = btrace_get_frame_function (next);
1845 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1848 tinfo = find_thread_ptid (inferior_ptid);
1849 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1852 bfun = btrace_call_get (&it);
1854 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1855 btrace_get_bfun_name (bfun), bfun->level);
1857 /* This is our frame. Initialize the frame cache. */
1858 cache = bfcache_new (this_frame);
1862 *this_cache = cache;
1867 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1869 struct btrace_frame_cache *cache;
1872 cache = (struct btrace_frame_cache *) this_cache;
1874 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1875 gdb_assert (slot != NULL);
1877 htab_remove_elt (bfcache, cache);
1880 /* btrace recording does not store previous memory content, neither the stack
1881 frames content. Any unwinding would return errorneous results as the stack
1882 contents no longer matches the changed PC value restored from history.
1883 Therefore this unwinder reports any possibly unwound registers as
1886 const struct frame_unwind record_btrace_frame_unwind =
1889 record_btrace_frame_unwind_stop_reason,
1890 record_btrace_frame_this_id,
1891 record_btrace_frame_prev_register,
1893 record_btrace_frame_sniffer,
1894 record_btrace_frame_dealloc_cache
1897 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1900 record_btrace_frame_unwind_stop_reason,
1901 record_btrace_frame_this_id,
1902 record_btrace_frame_prev_register,
1904 record_btrace_tailcall_frame_sniffer,
1905 record_btrace_frame_dealloc_cache
1908 /* Implement the get_unwinder method. */
1910 const struct frame_unwind *
1911 record_btrace_target::get_unwinder ()
1913 return &record_btrace_frame_unwind;
1916 /* Implement the get_tailcall_unwinder method. */
1918 const struct frame_unwind *
1919 record_btrace_target::get_tailcall_unwinder ()
1921 return &record_btrace_tailcall_frame_unwind;
1924 /* Return a human-readable string for FLAG. */
1927 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1935 return "reverse-step";
1941 return "reverse-cont";
1950 /* Indicate that TP should be resumed according to FLAG. */
1953 record_btrace_resume_thread (struct thread_info *tp,
1954 enum btrace_thread_flag flag)
1956 struct btrace_thread_info *btinfo;
1958 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1959 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1961 btinfo = &tp->btrace;
1963 /* Fetch the latest branch trace. */
1964 btrace_fetch (tp, record_btrace_get_cpu ());
1966 /* A resume request overwrites a preceding resume or stop request. */
1967 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1968 btinfo->flags |= flag;
1971 /* Get the current frame for TP. */
1973 static struct frame_info *
1974 get_thread_current_frame (struct thread_info *tp)
1976 struct frame_info *frame;
1977 ptid_t old_inferior_ptid;
1980 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1981 old_inferior_ptid = inferior_ptid;
1982 inferior_ptid = tp->ptid;
1984 /* Clear the executing flag to allow changes to the current frame.
1985 We are not actually running, yet. We just started a reverse execution
1986 command or a record goto command.
1987 For the latter, EXECUTING is false and this has no effect.
1988 For the former, EXECUTING is true and we're in wait, about to
1989 move the thread. Since we need to recompute the stack, we temporarily
1990 set EXECUTING to flase. */
1991 executing = is_executing (inferior_ptid);
1992 set_executing (inferior_ptid, 0);
1997 frame = get_current_frame ();
1999 CATCH (except, RETURN_MASK_ALL)
2001 /* Restore the previous execution state. */
2002 set_executing (inferior_ptid, executing);
2004 /* Restore the previous inferior_ptid. */
2005 inferior_ptid = old_inferior_ptid;
2007 throw_exception (except);
2011 /* Restore the previous execution state. */
2012 set_executing (inferior_ptid, executing);
2014 /* Restore the previous inferior_ptid. */
2015 inferior_ptid = old_inferior_ptid;
2020 /* Start replaying a thread. */
2022 static struct btrace_insn_iterator *
2023 record_btrace_start_replaying (struct thread_info *tp)
2025 struct btrace_insn_iterator *replay;
2026 struct btrace_thread_info *btinfo;
2028 btinfo = &tp->btrace;
2031 /* We can't start replaying without trace. */
2032 if (btinfo->functions.empty ())
2035 /* GDB stores the current frame_id when stepping in order to detects steps
2037 Since frames are computed differently when we're replaying, we need to
2038 recompute those stored frames and fix them up so we can still detect
2039 subroutines after we started replaying. */
2042 struct frame_info *frame;
2043 struct frame_id frame_id;
2044 int upd_step_frame_id, upd_step_stack_frame_id;
2046 /* The current frame without replaying - computed via normal unwind. */
2047 frame = get_thread_current_frame (tp);
2048 frame_id = get_frame_id (frame);
2050 /* Check if we need to update any stepping-related frame id's. */
2051 upd_step_frame_id = frame_id_eq (frame_id,
2052 tp->control.step_frame_id);
2053 upd_step_stack_frame_id = frame_id_eq (frame_id,
2054 tp->control.step_stack_frame_id);
2056 /* We start replaying at the end of the branch trace. This corresponds
2057 to the current instruction. */
2058 replay = XNEW (struct btrace_insn_iterator);
2059 btrace_insn_end (replay, btinfo);
2061 /* Skip gaps at the end of the trace. */
2062 while (btrace_insn_get (replay) == NULL)
2066 steps = btrace_insn_prev (replay, 1);
2068 error (_("No trace."));
2071 /* We're not replaying, yet. */
2072 gdb_assert (btinfo->replay == NULL);
2073 btinfo->replay = replay;
2075 /* Make sure we're not using any stale registers. */
2076 registers_changed_ptid (tp->ptid);
2078 /* The current frame with replaying - computed via btrace unwind. */
2079 frame = get_thread_current_frame (tp);
2080 frame_id = get_frame_id (frame);
2082 /* Replace stepping related frames where necessary. */
2083 if (upd_step_frame_id)
2084 tp->control.step_frame_id = frame_id;
2085 if (upd_step_stack_frame_id)
2086 tp->control.step_stack_frame_id = frame_id;
2088 CATCH (except, RETURN_MASK_ALL)
2090 xfree (btinfo->replay);
2091 btinfo->replay = NULL;
2093 registers_changed_ptid (tp->ptid);
2095 throw_exception (except);
2102 /* Stop replaying a thread. */
2105 record_btrace_stop_replaying (struct thread_info *tp)
2107 struct btrace_thread_info *btinfo;
2109 btinfo = &tp->btrace;
2111 xfree (btinfo->replay);
2112 btinfo->replay = NULL;
2114 /* Make sure we're not leaving any stale registers. */
2115 registers_changed_ptid (tp->ptid);
2118 /* Stop replaying TP if it is at the end of its execution history. */
2121 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2123 struct btrace_insn_iterator *replay, end;
2124 struct btrace_thread_info *btinfo;
2126 btinfo = &tp->btrace;
2127 replay = btinfo->replay;
2132 btrace_insn_end (&end, btinfo);
2134 if (btrace_insn_cmp (replay, &end) == 0)
2135 record_btrace_stop_replaying (tp);
2138 /* The resume method of target record-btrace. */
2141 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2143 struct thread_info *tp;
2144 enum btrace_thread_flag flag, cflag;
2146 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2147 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2148 step ? "step" : "cont");
2150 /* Store the execution direction of the last resume.
2152 If there is more than one resume call, we have to rely on infrun
2153 to not change the execution direction in-between. */
2154 record_btrace_resume_exec_dir = ::execution_direction;
2156 /* As long as we're not replaying, just forward the request.
2158 For non-stop targets this means that no thread is replaying. In order to
2159 make progress, we may need to explicitly move replaying threads to the end
2160 of their execution history. */
2161 if ((::execution_direction != EXEC_REVERSE)
2162 && !record_is_replaying (minus_one_ptid))
2164 this->beneath->resume (ptid, step, signal);
2168 /* Compute the btrace thread flag for the requested move. */
2169 if (::execution_direction == EXEC_REVERSE)
2171 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2176 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2180 /* We just indicate the resume intent here. The actual stepping happens in
2181 record_btrace_wait below.
2183 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2184 if (!target_is_non_stop_p ())
2186 gdb_assert (ptid_match (inferior_ptid, ptid));
2188 ALL_NON_EXITED_THREADS (tp)
2189 if (ptid_match (tp->ptid, ptid))
2191 if (ptid_match (tp->ptid, inferior_ptid))
2192 record_btrace_resume_thread (tp, flag);
2194 record_btrace_resume_thread (tp, cflag);
2199 ALL_NON_EXITED_THREADS (tp)
2200 if (ptid_match (tp->ptid, ptid))
2201 record_btrace_resume_thread (tp, flag);
2204 /* Async support. */
2205 if (target_can_async_p ())
2208 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2212 /* The commit_resume method of target record-btrace. */
2215 record_btrace_target::commit_resume ()
2217 if ((::execution_direction != EXEC_REVERSE)
2218 && !record_is_replaying (minus_one_ptid))
2219 beneath->commit_resume ();
2222 /* Cancel resuming TP. */
2225 record_btrace_cancel_resume (struct thread_info *tp)
2227 enum btrace_thread_flag flags;
2229 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2233 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2234 print_thread_id (tp),
2235 target_pid_to_str (tp->ptid), flags,
2236 btrace_thread_flag_to_str (flags));
2238 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2239 record_btrace_stop_replaying_at_end (tp);
2242 /* Return a target_waitstatus indicating that we ran out of history. */
2244 static struct target_waitstatus
2245 btrace_step_no_history (void)
2247 struct target_waitstatus status;
2249 status.kind = TARGET_WAITKIND_NO_HISTORY;
2254 /* Return a target_waitstatus indicating that a step finished. */
2256 static struct target_waitstatus
2257 btrace_step_stopped (void)
2259 struct target_waitstatus status;
2261 status.kind = TARGET_WAITKIND_STOPPED;
2262 status.value.sig = GDB_SIGNAL_TRAP;
2267 /* Return a target_waitstatus indicating that a thread was stopped as
2270 static struct target_waitstatus
2271 btrace_step_stopped_on_request (void)
2273 struct target_waitstatus status;
2275 status.kind = TARGET_WAITKIND_STOPPED;
2276 status.value.sig = GDB_SIGNAL_0;
2281 /* Return a target_waitstatus indicating a spurious stop. */
2283 static struct target_waitstatus
2284 btrace_step_spurious (void)
2286 struct target_waitstatus status;
2288 status.kind = TARGET_WAITKIND_SPURIOUS;
2293 /* Return a target_waitstatus indicating that the thread was not resumed. */
2295 static struct target_waitstatus
2296 btrace_step_no_resumed (void)
2298 struct target_waitstatus status;
2300 status.kind = TARGET_WAITKIND_NO_RESUMED;
2305 /* Return a target_waitstatus indicating that we should wait again. */
2307 static struct target_waitstatus
2308 btrace_step_again (void)
2310 struct target_waitstatus status;
2312 status.kind = TARGET_WAITKIND_IGNORE;
2317 /* Clear the record histories. */
2320 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2322 xfree (btinfo->insn_history);
2323 xfree (btinfo->call_history);
2325 btinfo->insn_history = NULL;
2326 btinfo->call_history = NULL;
2329 /* Check whether TP's current replay position is at a breakpoint. */
2332 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2334 struct btrace_insn_iterator *replay;
2335 struct btrace_thread_info *btinfo;
2336 const struct btrace_insn *insn;
2337 struct inferior *inf;
2339 btinfo = &tp->btrace;
2340 replay = btinfo->replay;
2345 insn = btrace_insn_get (replay);
2349 inf = find_inferior_ptid (tp->ptid);
2353 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2354 &btinfo->stop_reason);
2357 /* Step one instruction in forward direction. */
2359 static struct target_waitstatus
2360 record_btrace_single_step_forward (struct thread_info *tp)
2362 struct btrace_insn_iterator *replay, end, start;
2363 struct btrace_thread_info *btinfo;
2365 btinfo = &tp->btrace;
2366 replay = btinfo->replay;
2368 /* We're done if we're not replaying. */
2370 return btrace_step_no_history ();
2372 /* Check if we're stepping a breakpoint. */
2373 if (record_btrace_replay_at_breakpoint (tp))
2374 return btrace_step_stopped ();
2376 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2377 jump back to the instruction at which we started. */
2383 /* We will bail out here if we continue stepping after reaching the end
2384 of the execution history. */
2385 steps = btrace_insn_next (replay, 1);
2389 return btrace_step_no_history ();
2392 while (btrace_insn_get (replay) == NULL);
2394 /* Determine the end of the instruction trace. */
2395 btrace_insn_end (&end, btinfo);
2397 /* The execution trace contains (and ends with) the current instruction.
2398 This instruction has not been executed, yet, so the trace really ends
2399 one instruction earlier. */
2400 if (btrace_insn_cmp (replay, &end) == 0)
2401 return btrace_step_no_history ();
2403 return btrace_step_spurious ();
2406 /* Step one instruction in backward direction. */
2408 static struct target_waitstatus
2409 record_btrace_single_step_backward (struct thread_info *tp)
2411 struct btrace_insn_iterator *replay, start;
2412 struct btrace_thread_info *btinfo;
2414 btinfo = &tp->btrace;
2415 replay = btinfo->replay;
2417 /* Start replaying if we're not already doing so. */
2419 replay = record_btrace_start_replaying (tp);
2421 /* If we can't step any further, we reached the end of the history.
2422 Skip gaps during replay. If we end up at a gap (at the beginning of
2423 the trace), jump back to the instruction at which we started. */
2429 steps = btrace_insn_prev (replay, 1);
2433 return btrace_step_no_history ();
2436 while (btrace_insn_get (replay) == NULL);
2438 /* Check if we're stepping a breakpoint.
2440 For reverse-stepping, this check is after the step. There is logic in
2441 infrun.c that handles reverse-stepping separately. See, for example,
2442 proceed and adjust_pc_after_break.
2444 This code assumes that for reverse-stepping, PC points to the last
2445 de-executed instruction, whereas for forward-stepping PC points to the
2446 next to-be-executed instruction. */
2447 if (record_btrace_replay_at_breakpoint (tp))
2448 return btrace_step_stopped ();
2450 return btrace_step_spurious ();
2453 /* Step a single thread. */
2455 static struct target_waitstatus
2456 record_btrace_step_thread (struct thread_info *tp)
2458 struct btrace_thread_info *btinfo;
2459 struct target_waitstatus status;
2460 enum btrace_thread_flag flags;
2462 btinfo = &tp->btrace;
2464 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2465 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2467 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2468 target_pid_to_str (tp->ptid), flags,
2469 btrace_thread_flag_to_str (flags));
2471 /* We can't step without an execution history. */
2472 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2473 return btrace_step_no_history ();
2478 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2481 return btrace_step_stopped_on_request ();
2484 status = record_btrace_single_step_forward (tp);
2485 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2488 return btrace_step_stopped ();
2491 status = record_btrace_single_step_backward (tp);
2492 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2495 return btrace_step_stopped ();
2498 status = record_btrace_single_step_forward (tp);
2499 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2502 btinfo->flags |= flags;
2503 return btrace_step_again ();
2506 status = record_btrace_single_step_backward (tp);
2507 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2510 btinfo->flags |= flags;
2511 return btrace_step_again ();
2514 /* We keep threads moving at the end of their execution history. The wait
2515 method will stop the thread for whom the event is reported. */
2516 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2517 btinfo->flags |= flags;
2522 /* A vector of threads. */
2524 typedef struct thread_info * tp_t;
2527 /* Announce further events if necessary. */
2530 record_btrace_maybe_mark_async_event
2531 (const std::vector<thread_info *> &moving,
2532 const std::vector<thread_info *> &no_history)
2534 bool more_moving = !moving.empty ();
2535 bool more_no_history = !no_history.empty ();;
2537 if (!more_moving && !more_no_history)
2541 DEBUG ("movers pending");
2543 if (more_no_history)
2544 DEBUG ("no-history pending");
2546 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2549 /* The wait method of target record-btrace. */
2552 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2555 std::vector<thread_info *> moving;
2556 std::vector<thread_info *> no_history;
2558 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2560 /* As long as we're not replaying, just forward the request. */
2561 if ((::execution_direction != EXEC_REVERSE)
2562 && !record_is_replaying (minus_one_ptid))
2564 return this->beneath->wait (ptid, status, options);
2567 /* Keep a work list of moving threads. */
2571 ALL_NON_EXITED_THREADS (tp)
2573 if (ptid_match (tp->ptid, ptid)
2574 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2575 moving.push_back (tp);
2579 if (moving.empty ())
2581 *status = btrace_step_no_resumed ();
2583 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2584 target_waitstatus_to_string (status).c_str ());
2589 /* Step moving threads one by one, one step each, until either one thread
2590 reports an event or we run out of threads to step.
2592 When stepping more than one thread, chances are that some threads reach
2593 the end of their execution history earlier than others. If we reported
2594 this immediately, all-stop on top of non-stop would stop all threads and
2595 resume the same threads next time. And we would report the same thread
2596 having reached the end of its execution history again.
2598 In the worst case, this would starve the other threads. But even if other
2599 threads would be allowed to make progress, this would result in far too
2600 many intermediate stops.
2602 We therefore delay the reporting of "no execution history" until we have
2603 nothing else to report. By this time, all threads should have moved to
2604 either the beginning or the end of their execution history. There will
2605 be a single user-visible stop. */
2606 struct thread_info *eventing = NULL;
2607 while ((eventing == NULL) && !moving.empty ())
2609 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2611 thread_info *tp = moving[ix];
2613 *status = record_btrace_step_thread (tp);
2615 switch (status->kind)
2617 case TARGET_WAITKIND_IGNORE:
2621 case TARGET_WAITKIND_NO_HISTORY:
2622 no_history.push_back (ordered_remove (moving, ix));
2626 eventing = unordered_remove (moving, ix);
2632 if (eventing == NULL)
2634 /* We started with at least one moving thread. This thread must have
2635 either stopped or reached the end of its execution history.
2637 In the former case, EVENTING must not be NULL.
2638 In the latter case, NO_HISTORY must not be empty. */
2639 gdb_assert (!no_history.empty ());
2641 /* We kept threads moving at the end of their execution history. Stop
2642 EVENTING now that we are going to report its stop. */
2643 eventing = unordered_remove (no_history, 0);
2644 eventing->btrace.flags &= ~BTHR_MOVE;
2646 *status = btrace_step_no_history ();
2649 gdb_assert (eventing != NULL);
2651 /* We kept threads replaying at the end of their execution history. Stop
2652 replaying EVENTING now that we are going to report its stop. */
2653 record_btrace_stop_replaying_at_end (eventing);
2655 /* Stop all other threads. */
2656 if (!target_is_non_stop_p ())
2660 ALL_NON_EXITED_THREADS (tp)
2661 record_btrace_cancel_resume (tp);
2664 /* In async mode, we need to announce further events. */
2665 if (target_is_async_p ())
2666 record_btrace_maybe_mark_async_event (moving, no_history);
2668 /* Start record histories anew from the current position. */
2669 record_btrace_clear_histories (&eventing->btrace);
2671 /* We moved the replay position but did not update registers. */
2672 registers_changed_ptid (eventing->ptid);
2674 DEBUG ("wait ended by thread %s (%s): %s",
2675 print_thread_id (eventing),
2676 target_pid_to_str (eventing->ptid),
2677 target_waitstatus_to_string (status).c_str ());
2679 return eventing->ptid;
2682 /* The stop method of target record-btrace. */
2685 record_btrace_target::stop (ptid_t ptid)
2687 DEBUG ("stop %s", target_pid_to_str (ptid));
2689 /* As long as we're not replaying, just forward the request. */
2690 if ((::execution_direction != EXEC_REVERSE)
2691 && !record_is_replaying (minus_one_ptid))
2693 this->beneath->stop (ptid);
2697 struct thread_info *tp;
2699 ALL_NON_EXITED_THREADS (tp)
2700 if (ptid_match (tp->ptid, ptid))
2702 tp->btrace.flags &= ~BTHR_MOVE;
2703 tp->btrace.flags |= BTHR_STOP;
2708 /* The can_execute_reverse method of target record-btrace. */
2711 record_btrace_target::can_execute_reverse ()
2716 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2719 record_btrace_target::stopped_by_sw_breakpoint ()
2721 if (record_is_replaying (minus_one_ptid))
2723 struct thread_info *tp = inferior_thread ();
2725 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2728 return this->beneath->stopped_by_sw_breakpoint ();
2731 /* The supports_stopped_by_sw_breakpoint method of target
2735 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2737 if (record_is_replaying (minus_one_ptid))
2740 return this->beneath->supports_stopped_by_sw_breakpoint ();
2743 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2746 record_btrace_target::stopped_by_hw_breakpoint ()
2748 if (record_is_replaying (minus_one_ptid))
2750 struct thread_info *tp = inferior_thread ();
2752 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2755 return this->beneath->stopped_by_hw_breakpoint ();
2758 /* The supports_stopped_by_hw_breakpoint method of target
2762 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2764 if (record_is_replaying (minus_one_ptid))
2767 return this->beneath->supports_stopped_by_hw_breakpoint ();
2770 /* The update_thread_list method of target record-btrace. */
2773 record_btrace_target::update_thread_list ()
2775 /* We don't add or remove threads during replay. */
2776 if (record_is_replaying (minus_one_ptid))
2779 /* Forward the request. */
2780 this->beneath->update_thread_list ();
2783 /* The thread_alive method of target record-btrace. */
2786 record_btrace_target::thread_alive (ptid_t ptid)
2788 /* We don't add or remove threads during replay. */
2789 if (record_is_replaying (minus_one_ptid))
2790 return find_thread_ptid (ptid) != NULL;
2792 /* Forward the request. */
2793 return this->beneath->thread_alive (ptid);
2796 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2800 record_btrace_set_replay (struct thread_info *tp,
2801 const struct btrace_insn_iterator *it)
2803 struct btrace_thread_info *btinfo;
2805 btinfo = &tp->btrace;
2808 record_btrace_stop_replaying (tp);
2811 if (btinfo->replay == NULL)
2812 record_btrace_start_replaying (tp);
2813 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2816 *btinfo->replay = *it;
2817 registers_changed_ptid (tp->ptid);
2820 /* Start anew from the new replay position. */
2821 record_btrace_clear_histories (btinfo);
2823 stop_pc = regcache_read_pc (get_current_regcache ());
2824 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2827 /* The goto_record_begin method of target record-btrace. */
2830 record_btrace_target::goto_record_begin ()
2832 struct thread_info *tp;
2833 struct btrace_insn_iterator begin;
2835 tp = require_btrace_thread ();
2837 btrace_insn_begin (&begin, &tp->btrace);
2839 /* Skip gaps at the beginning of the trace. */
2840 while (btrace_insn_get (&begin) == NULL)
2844 steps = btrace_insn_next (&begin, 1);
2846 error (_("No trace."));
2849 record_btrace_set_replay (tp, &begin);
2852 /* The goto_record_end method of target record-btrace. */
2855 record_btrace_target::goto_record_end ()
2857 struct thread_info *tp;
2859 tp = require_btrace_thread ();
2861 record_btrace_set_replay (tp, NULL);
2864 /* The goto_record method of target record-btrace. */
2867 record_btrace_target::goto_record (ULONGEST insn)
2869 struct thread_info *tp;
2870 struct btrace_insn_iterator it;
2871 unsigned int number;
2876 /* Check for wrap-arounds. */
2878 error (_("Instruction number out of range."));
2880 tp = require_btrace_thread ();
2882 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2884 /* Check if the instruction could not be found or is a gap. */
2885 if (found == 0 || btrace_insn_get (&it) == NULL)
2886 error (_("No such instruction."));
2888 record_btrace_set_replay (tp, &it);
2891 /* The record_stop_replaying method of target record-btrace. */
2894 record_btrace_target::record_stop_replaying ()
2896 struct thread_info *tp;
2898 ALL_NON_EXITED_THREADS (tp)
2899 record_btrace_stop_replaying (tp);
2902 /* The execution_direction target method. */
2904 enum exec_direction_kind
2905 record_btrace_target::execution_direction ()
2907 return record_btrace_resume_exec_dir;
2910 /* The prepare_to_generate_core target method. */
2913 record_btrace_target::prepare_to_generate_core ()
2915 record_btrace_generating_corefile = 1;
2918 /* The done_generating_core target method. */
2921 record_btrace_target::done_generating_core ()
2923 record_btrace_generating_corefile = 0;
2926 /* Start recording in BTS format. */
2929 cmd_record_btrace_bts_start (const char *args, int from_tty)
2931 if (args != NULL && *args != 0)
2932 error (_("Invalid argument."));
2934 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2938 execute_command ("target record-btrace", from_tty);
2940 CATCH (exception, RETURN_MASK_ALL)
2942 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2943 throw_exception (exception);
2948 /* Start recording in Intel Processor Trace format. */
2951 cmd_record_btrace_pt_start (const char *args, int from_tty)
2953 if (args != NULL && *args != 0)
2954 error (_("Invalid argument."));
2956 record_btrace_conf.format = BTRACE_FORMAT_PT;
2960 execute_command ("target record-btrace", from_tty);
2962 CATCH (exception, RETURN_MASK_ALL)
2964 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2965 throw_exception (exception);
2970 /* Alias for "target record". */
2973 cmd_record_btrace_start (const char *args, int from_tty)
2975 if (args != NULL && *args != 0)
2976 error (_("Invalid argument."));
2978 record_btrace_conf.format = BTRACE_FORMAT_PT;
2982 execute_command ("target record-btrace", from_tty);
2984 CATCH (exception, RETURN_MASK_ALL)
2986 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2990 execute_command ("target record-btrace", from_tty);
2992 CATCH (exception, RETURN_MASK_ALL)
2994 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2995 throw_exception (exception);
3002 /* The "set record btrace" command. */
3005 cmd_set_record_btrace (const char *args, int from_tty)
3007 printf_unfiltered (_("\"set record btrace\" must be followed "
3008 "by an appropriate subcommand.\n"));
3009 help_list (set_record_btrace_cmdlist, "set record btrace ",
3010 all_commands, gdb_stdout);
3013 /* The "show record btrace" command. */
3016 cmd_show_record_btrace (const char *args, int from_tty)
3018 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3021 /* The "show record btrace replay-memory-access" command. */
3024 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3025 struct cmd_list_element *c, const char *value)
3027 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3028 replay_memory_access);
3031 /* The "set record btrace cpu none" command. */
3034 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3036 if (args != nullptr && *args != 0)
3037 error (_("Trailing junk: '%s'."), args);
3039 record_btrace_cpu_state = CS_NONE;
3042 /* The "set record btrace cpu auto" command. */
3045 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3047 if (args != nullptr && *args != 0)
3048 error (_("Trailing junk: '%s'."), args);
3050 record_btrace_cpu_state = CS_AUTO;
3053 /* The "set record btrace cpu" command. */
3056 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3058 if (args == nullptr)
3061 /* We use a hard-coded vendor string for now. */
3062 unsigned int family, model, stepping;
3063 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3064 &model, &l1, &stepping, &l2);
3067 if (strlen (args) != l2)
3068 error (_("Trailing junk: '%s'."), args + l2);
3070 else if (matches == 2)
3072 if (strlen (args) != l1)
3073 error (_("Trailing junk: '%s'."), args + l1);
3078 error (_("Bad format. See \"help set record btrace cpu\"."));
3080 if (USHRT_MAX < family)
3081 error (_("Cpu family too big."));
3083 if (UCHAR_MAX < model)
3084 error (_("Cpu model too big."));
3086 if (UCHAR_MAX < stepping)
3087 error (_("Cpu stepping too big."));
3089 record_btrace_cpu.vendor = CV_INTEL;
3090 record_btrace_cpu.family = family;
3091 record_btrace_cpu.model = model;
3092 record_btrace_cpu.stepping = stepping;
3094 record_btrace_cpu_state = CS_CPU;
3097 /* The "show record btrace cpu" command. */
3100 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3104 if (args != nullptr && *args != 0)
3105 error (_("Trailing junk: '%s'."), args);
3107 switch (record_btrace_cpu_state)
3110 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3114 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3118 switch (record_btrace_cpu.vendor)
3121 if (record_btrace_cpu.stepping == 0)
3122 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3123 record_btrace_cpu.family,
3124 record_btrace_cpu.model);
3126 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3127 record_btrace_cpu.family,
3128 record_btrace_cpu.model,
3129 record_btrace_cpu.stepping);
3134 error (_("Internal error: bad cpu state."));
3137 /* The "s record btrace bts" command. */
3140 cmd_set_record_btrace_bts (const char *args, int from_tty)
3142 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3143 "by an appropriate subcommand.\n"));
3144 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3145 all_commands, gdb_stdout);
3148 /* The "show record btrace bts" command. */
3151 cmd_show_record_btrace_bts (const char *args, int from_tty)
3153 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3156 /* The "set record btrace pt" command. */
3159 cmd_set_record_btrace_pt (const char *args, int from_tty)
3161 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3162 "by an appropriate subcommand.\n"));
3163 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3164 all_commands, gdb_stdout);
3167 /* The "show record btrace pt" command. */
3170 cmd_show_record_btrace_pt (const char *args, int from_tty)
3172 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3175 /* The "record bts buffer-size" show value function. */
3178 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3179 struct cmd_list_element *c,
3182 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3186 /* The "record pt buffer-size" show value function. */
3189 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3190 struct cmd_list_element *c,
3193 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3197 /* Initialize btrace commands. */
3200 _initialize_record_btrace (void)
3202 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3203 _("Start branch trace recording."), &record_btrace_cmdlist,
3204 "record btrace ", 0, &record_cmdlist);
3205 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3207 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3209 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3210 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3211 This format may not be available on all processors."),
3212 &record_btrace_cmdlist);
3213 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3215 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3217 Start branch trace recording in Intel Processor Trace format.\n\n\
3218 This format may not be available on all processors."),
3219 &record_btrace_cmdlist);
3220 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3222 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3223 _("Set record options"), &set_record_btrace_cmdlist,
3224 "set record btrace ", 0, &set_record_cmdlist);
3226 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3227 _("Show record options"), &show_record_btrace_cmdlist,
3228 "show record btrace ", 0, &show_record_cmdlist);
3230 add_setshow_enum_cmd ("replay-memory-access", no_class,
3231 replay_memory_access_types, &replay_memory_access, _("\
3232 Set what memory accesses are allowed during replay."), _("\
3233 Show what memory accesses are allowed during replay."),
3234 _("Default is READ-ONLY.\n\n\
3235 The btrace record target does not trace data.\n\
3236 The memory therefore corresponds to the live target and not \
3237 to the current replay position.\n\n\
3238 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3239 When READ-WRITE, allow accesses to read-only and read-write memory during \
3241 NULL, cmd_show_replay_memory_access,
3242 &set_record_btrace_cmdlist,
3243 &show_record_btrace_cmdlist);
3245 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3247 Set the cpu to be used for trace decode.\n\n\
3248 The format is \"<vendor>:<identifier>\" or \"none\" or \"auto\" (default).\n\
3249 For vendor \"intel\" the format is \"<family>/<model>[/<stepping>]\".\n\n\
3250 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3251 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3252 When GDB does not support that cpu, this option can be used to enable\n\
3253 workarounds for a similar cpu that GDB supports.\n\n\
3254 When set to \"none\", errata workarounds are disabled."),
3255 &set_record_btrace_cpu_cmdlist,
3256 _("set record btrace cpu "), 1,
3257 &set_record_btrace_cmdlist);
3259 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3260 Automatically determine the cpu to be used for trace decode."),
3261 &set_record_btrace_cpu_cmdlist);
3263 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3264 Do not enable errata workarounds for trace decode."),
3265 &set_record_btrace_cpu_cmdlist);
3267 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3268 Show the cpu to be used for trace decode."),
3269 &show_record_btrace_cmdlist);
3271 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3272 _("Set record btrace bts options"),
3273 &set_record_btrace_bts_cmdlist,
3274 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3276 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3277 _("Show record btrace bts options"),
3278 &show_record_btrace_bts_cmdlist,
3279 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3281 add_setshow_uinteger_cmd ("buffer-size", no_class,
3282 &record_btrace_conf.bts.size,
3283 _("Set the record/replay bts buffer size."),
3284 _("Show the record/replay bts buffer size."), _("\
3285 When starting recording request a trace buffer of this size. \
3286 The actual buffer size may differ from the requested size. \
3287 Use \"info record\" to see the actual buffer size.\n\n\
3288 Bigger buffers allow longer recording but also take more time to process \
3289 the recorded execution trace.\n\n\
3290 The trace buffer size may not be changed while recording."), NULL,
3291 show_record_bts_buffer_size_value,
3292 &set_record_btrace_bts_cmdlist,
3293 &show_record_btrace_bts_cmdlist);
3295 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3296 _("Set record btrace pt options"),
3297 &set_record_btrace_pt_cmdlist,
3298 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3300 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3301 _("Show record btrace pt options"),
3302 &show_record_btrace_pt_cmdlist,
3303 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3305 add_setshow_uinteger_cmd ("buffer-size", no_class,
3306 &record_btrace_conf.pt.size,
3307 _("Set the record/replay pt buffer size."),
3308 _("Show the record/replay pt buffer size."), _("\
3309 Bigger buffers allow longer recording but also take more time to process \
3310 the recorded execution.\n\
3311 The actual buffer size may differ from the requested size. Use \"info record\" \
3312 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3313 &set_record_btrace_pt_cmdlist,
3314 &show_record_btrace_pt_cmdlist);
3316 add_target (record_btrace_target_info, record_btrace_target_open);
3318 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3321 record_btrace_conf.bts.size = 64 * 1024;
3322 record_btrace_conf.pt.size = 16 * 1024;