1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
46 class record_btrace_target final : public target_ops
49 record_btrace_target ()
50 { to_stratum = record_stratum; }
52 const char *shortname () override
53 { return "record-btrace"; }
55 const char *longname () override
56 { return _("Branch tracing target"); }
58 const char *doc () override
59 { return _("Collect control-flow trace and provide the execution history."); }
61 void open (const char *, int) override;
62 void close () override;
63 void async (int) override;
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
68 void disconnect (const char *, int) override;
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
74 { record_kill (this); }
76 enum record_method record_method (ptid_t ptid) override;
78 void stop_recording () override;
79 void info_record () override;
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
96 enum target_xfer_status xfer_partial (enum target_object object,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
108 void fetch_registers (struct regcache *, int) override;
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
113 const struct frame_unwind *get_unwinder () override;
115 const struct frame_unwind *get_tailcall_unwinder () override;
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
128 bool can_execute_reverse () override;
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
141 static record_btrace_target record_btrace_ops;
143 /* Initialize the record-btrace target ops. */
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token;
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
209 #define DEBUG(msg, args...) \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
224 switch (record_btrace_cpu_state)
230 record_btrace_cpu.vendor = CV_UNKNOWN;
233 return &record_btrace_cpu;
236 error (_("Internal error: bad record btrace cpu state."));
239 /* Update the branch trace for the current thread and return a pointer to its
242 Throws an error if there is no thread or no trace. This function never
245 static struct thread_info *
246 require_btrace_thread (void)
248 struct thread_info *tp;
252 tp = find_thread_ptid (inferior_ptid);
254 error (_("No thread."));
256 validate_registers_access ();
258 btrace_fetch (tp, record_btrace_get_cpu ());
260 if (btrace_is_empty (tp))
261 error (_("No trace."));
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
269 Throws an error if there is no thread or no trace. This function never
272 static struct btrace_thread_info *
273 require_btrace (void)
275 struct thread_info *tp;
277 tp = require_btrace_thread ();
282 /* Enable branch tracing for one thread. Warn on errors. */
285 record_btrace_enable_warn (struct thread_info *tp)
289 btrace_enable (tp, &record_btrace_conf);
291 CATCH (error, RETURN_MASK_ERROR)
293 warning ("%s", error.message);
298 /* Enable automatic tracing of new threads. */
301 record_btrace_auto_enable (void)
303 DEBUG ("attach thread observer");
305 gdb::observers::new_thread.attach (record_btrace_enable_warn,
306 record_btrace_thread_observer_token);
309 /* Disable automatic tracing of new threads. */
312 record_btrace_auto_disable (void)
314 DEBUG ("detach thread observer");
316 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
319 /* The record-btrace async event handler function. */
322 record_btrace_handle_async_inferior_event (gdb_client_data data)
324 inferior_event_handler (INF_REG_EVENT, NULL);
327 /* See record-btrace.h. */
330 record_btrace_push_target (void)
334 record_btrace_auto_enable ();
336 push_target (&record_btrace_ops);
338 record_btrace_async_inferior_event_handler
339 = create_async_event_handler (record_btrace_handle_async_inferior_event,
341 record_btrace_generating_corefile = 0;
343 format = btrace_format_short_string (record_btrace_conf.format);
344 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
347 /* Disable btrace on a set of threads on scope exit. */
349 struct scoped_btrace_disable
351 scoped_btrace_disable () = default;
353 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
355 ~scoped_btrace_disable ()
357 for (thread_info *tp : m_threads)
361 void add_thread (thread_info *thread)
363 m_threads.push_front (thread);
372 std::forward_list<thread_info *> m_threads;
375 /* The open method of target record-btrace. */
378 record_btrace_target::open (const char *args, int from_tty)
380 /* If we fail to enable btrace for one thread, disable it for the threads for
381 which it was successfully enabled. */
382 scoped_btrace_disable btrace_disable;
383 struct thread_info *tp;
389 if (!target_has_execution)
390 error (_("The program is not being run."));
392 ALL_NON_EXITED_THREADS (tp)
393 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
395 btrace_enable (tp, &record_btrace_conf);
397 btrace_disable.add_thread (tp);
400 record_btrace_push_target ();
402 btrace_disable.discard ();
405 /* The stop_recording method of target record-btrace. */
408 record_btrace_target::stop_recording ()
410 struct thread_info *tp;
412 DEBUG ("stop recording");
414 record_btrace_auto_disable ();
416 ALL_NON_EXITED_THREADS (tp)
417 if (tp->btrace.target != NULL)
421 /* The disconnect method of target record-btrace. */
424 record_btrace_target::disconnect (const char *args,
427 struct target_ops *beneath = this->beneath;
429 /* Do not stop recording, just clean up GDB side. */
430 unpush_target (this);
432 /* Forward disconnect. */
433 beneath->disconnect (args, from_tty);
436 /* The close method of target record-btrace. */
439 record_btrace_target::close ()
441 struct thread_info *tp;
443 if (record_btrace_async_inferior_event_handler != NULL)
444 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
446 /* Make sure automatic recording gets disabled even if we did not stop
447 recording before closing the record-btrace target. */
448 record_btrace_auto_disable ();
450 /* We should have already stopped recording.
451 Tear down btrace in case we have not. */
452 ALL_NON_EXITED_THREADS (tp)
453 btrace_teardown (tp);
456 /* The async method of target record-btrace. */
459 record_btrace_target::async (int enable)
462 mark_async_event_handler (record_btrace_async_inferior_event_handler);
464 clear_async_event_handler (record_btrace_async_inferior_event_handler);
466 this->beneath->async (enable);
469 /* Adjusts the size and returns a human readable size suffix. */
472 record_btrace_adjust_size (unsigned int *size)
478 if ((sz & ((1u << 30) - 1)) == 0)
483 else if ((sz & ((1u << 20) - 1)) == 0)
488 else if ((sz & ((1u << 10) - 1)) == 0)
497 /* Print a BTS configuration. */
500 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
508 suffix = record_btrace_adjust_size (&size);
509 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
513 /* Print an Intel Processor Trace configuration. */
516 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
524 suffix = record_btrace_adjust_size (&size);
525 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
529 /* Print a branch tracing configuration. */
532 record_btrace_print_conf (const struct btrace_config *conf)
534 printf_unfiltered (_("Recording format: %s.\n"),
535 btrace_format_string (conf->format));
537 switch (conf->format)
539 case BTRACE_FORMAT_NONE:
542 case BTRACE_FORMAT_BTS:
543 record_btrace_print_bts_conf (&conf->bts);
546 case BTRACE_FORMAT_PT:
547 record_btrace_print_pt_conf (&conf->pt);
551 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
554 /* The info_record method of target record-btrace. */
557 record_btrace_target::info_record ()
559 struct btrace_thread_info *btinfo;
560 const struct btrace_config *conf;
561 struct thread_info *tp;
562 unsigned int insns, calls, gaps;
566 tp = find_thread_ptid (inferior_ptid);
568 error (_("No thread."));
570 validate_registers_access ();
572 btinfo = &tp->btrace;
574 conf = ::btrace_conf (btinfo);
576 record_btrace_print_conf (conf);
578 btrace_fetch (tp, record_btrace_get_cpu ());
584 if (!btrace_is_empty (tp))
586 struct btrace_call_iterator call;
587 struct btrace_insn_iterator insn;
589 btrace_call_end (&call, btinfo);
590 btrace_call_prev (&call, 1);
591 calls = btrace_call_number (&call);
593 btrace_insn_end (&insn, btinfo);
594 insns = btrace_insn_number (&insn);
596 /* If the last instruction is not a gap, it is the current instruction
597 that is not actually part of the record. */
598 if (btrace_insn_get (&insn) != NULL)
601 gaps = btinfo->ngaps;
604 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
605 "for thread %s (%s).\n"), insns, calls, gaps,
606 print_thread_id (tp), target_pid_to_str (tp->ptid));
608 if (btrace_is_replaying (tp))
609 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
610 btrace_insn_number (btinfo->replay));
613 /* Print a decode error. */
616 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
617 enum btrace_format format)
619 const char *errstr = btrace_decode_error (format, errcode);
621 uiout->text (_("["));
622 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
623 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
625 uiout->text (_("decode error ("));
626 uiout->field_int ("errcode", errcode);
627 uiout->text (_("): "));
629 uiout->text (errstr);
630 uiout->text (_("]\n"));
633 /* Print an unsigned int. */
636 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
638 uiout->field_fmt (fld, "%u", val);
641 /* A range of source lines. */
643 struct btrace_line_range
645 /* The symtab this line is from. */
646 struct symtab *symtab;
648 /* The first line (inclusive). */
651 /* The last line (exclusive). */
655 /* Construct a line range. */
657 static struct btrace_line_range
658 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
660 struct btrace_line_range range;
662 range.symtab = symtab;
669 /* Add a line to a line range. */
671 static struct btrace_line_range
672 btrace_line_range_add (struct btrace_line_range range, int line)
674 if (range.end <= range.begin)
676 /* This is the first entry. */
678 range.end = line + 1;
680 else if (line < range.begin)
682 else if (range.end < line)
688 /* Return non-zero if RANGE is empty, zero otherwise. */
691 btrace_line_range_is_empty (struct btrace_line_range range)
693 return range.end <= range.begin;
696 /* Return non-zero if LHS contains RHS, zero otherwise. */
699 btrace_line_range_contains_range (struct btrace_line_range lhs,
700 struct btrace_line_range rhs)
702 return ((lhs.symtab == rhs.symtab)
703 && (lhs.begin <= rhs.begin)
704 && (rhs.end <= lhs.end));
707 /* Find the line range associated with PC. */
709 static struct btrace_line_range
710 btrace_find_line_range (CORE_ADDR pc)
712 struct btrace_line_range range;
713 struct linetable_entry *lines;
714 struct linetable *ltable;
715 struct symtab *symtab;
718 symtab = find_pc_line_symtab (pc);
720 return btrace_mk_line_range (NULL, 0, 0);
722 ltable = SYMTAB_LINETABLE (symtab);
724 return btrace_mk_line_range (symtab, 0, 0);
726 nlines = ltable->nitems;
727 lines = ltable->item;
729 return btrace_mk_line_range (symtab, 0, 0);
731 range = btrace_mk_line_range (symtab, 0, 0);
732 for (i = 0; i < nlines - 1; i++)
734 if ((lines[i].pc == pc) && (lines[i].line != 0))
735 range = btrace_line_range_add (range, lines[i].line);
741 /* Print source lines in LINES to UIOUT.
743 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
744 instructions corresponding to that source line. When printing a new source
745 line, we do the cleanups for the open chain and open a new cleanup chain for
746 the new source line. If the source line range in LINES is not empty, this
747 function will leave the cleanup chain for the last printed source line open
748 so instructions can be added to it. */
751 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
752 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
753 gdb::optional<ui_out_emit_list> *asm_list,
754 gdb_disassembly_flags flags)
756 print_source_lines_flags psl_flags;
758 if (flags & DISASSEMBLY_FILENAME)
759 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
761 for (int line = lines.begin; line < lines.end; ++line)
765 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
767 print_source_lines (lines.symtab, line, line + 1, psl_flags);
769 asm_list->emplace (uiout, "line_asm_insn");
773 /* Disassemble a section of the recorded instruction trace. */
776 btrace_insn_history (struct ui_out *uiout,
777 const struct btrace_thread_info *btinfo,
778 const struct btrace_insn_iterator *begin,
779 const struct btrace_insn_iterator *end,
780 gdb_disassembly_flags flags)
782 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
783 btrace_insn_number (begin), btrace_insn_number (end));
785 flags |= DISASSEMBLY_SPECULATIVE;
787 struct gdbarch *gdbarch = target_gdbarch ();
788 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
790 ui_out_emit_list list_emitter (uiout, "asm_insns");
792 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
793 gdb::optional<ui_out_emit_list> asm_list;
795 gdb_pretty_print_disassembler disasm (gdbarch);
797 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
798 btrace_insn_next (&it, 1))
800 const struct btrace_insn *insn;
802 insn = btrace_insn_get (&it);
804 /* A NULL instruction indicates a gap in the trace. */
807 const struct btrace_config *conf;
809 conf = btrace_conf (btinfo);
811 /* We have trace so we must have a configuration. */
812 gdb_assert (conf != NULL);
814 uiout->field_fmt ("insn-number", "%u",
815 btrace_insn_number (&it));
818 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
823 struct disasm_insn dinsn;
825 if ((flags & DISASSEMBLY_SOURCE) != 0)
827 struct btrace_line_range lines;
829 lines = btrace_find_line_range (insn->pc);
830 if (!btrace_line_range_is_empty (lines)
831 && !btrace_line_range_contains_range (last_lines, lines))
833 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
837 else if (!src_and_asm_tuple.has_value ())
839 gdb_assert (!asm_list.has_value ());
841 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
843 /* No source information. */
844 asm_list.emplace (uiout, "line_asm_insn");
847 gdb_assert (src_and_asm_tuple.has_value ());
848 gdb_assert (asm_list.has_value ());
851 memset (&dinsn, 0, sizeof (dinsn));
852 dinsn.number = btrace_insn_number (&it);
853 dinsn.addr = insn->pc;
855 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
856 dinsn.is_speculative = 1;
858 disasm.pretty_print_insn (uiout, &dinsn, flags);
863 /* The insn_history method of target record-btrace. */
866 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
868 struct btrace_thread_info *btinfo;
869 struct btrace_insn_history *history;
870 struct btrace_insn_iterator begin, end;
871 struct ui_out *uiout;
872 unsigned int context, covered;
874 uiout = current_uiout;
875 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
876 context = abs (size);
878 error (_("Bad record instruction-history-size."));
880 btinfo = require_btrace ();
881 history = btinfo->insn_history;
884 struct btrace_insn_iterator *replay;
886 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
888 /* If we're replaying, we start at the replay position. Otherwise, we
889 start at the tail of the trace. */
890 replay = btinfo->replay;
894 btrace_insn_end (&begin, btinfo);
896 /* We start from here and expand in the requested direction. Then we
897 expand in the other direction, as well, to fill up any remaining
902 /* We want the current position covered, as well. */
903 covered = btrace_insn_next (&end, 1);
904 covered += btrace_insn_prev (&begin, context - covered);
905 covered += btrace_insn_next (&end, context - covered);
909 covered = btrace_insn_next (&end, context);
910 covered += btrace_insn_prev (&begin, context - covered);
915 begin = history->begin;
918 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
919 btrace_insn_number (&begin), btrace_insn_number (&end));
924 covered = btrace_insn_prev (&begin, context);
929 covered = btrace_insn_next (&end, context);
934 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
938 printf_unfiltered (_("At the start of the branch trace record.\n"));
940 printf_unfiltered (_("At the end of the branch trace record.\n"));
943 btrace_set_insn_history (btinfo, &begin, &end);
946 /* The insn_history_range method of target record-btrace. */
949 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
950 gdb_disassembly_flags flags)
952 struct btrace_thread_info *btinfo;
953 struct btrace_insn_iterator begin, end;
954 struct ui_out *uiout;
955 unsigned int low, high;
958 uiout = current_uiout;
959 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
963 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
965 /* Check for wrap-arounds. */
966 if (low != from || high != to)
967 error (_("Bad range."));
970 error (_("Bad range."));
972 btinfo = require_btrace ();
974 found = btrace_find_insn_by_number (&begin, btinfo, low);
976 error (_("Range out of bounds."));
978 found = btrace_find_insn_by_number (&end, btinfo, high);
981 /* Silently truncate the range. */
982 btrace_insn_end (&end, btinfo);
986 /* We want both begin and end to be inclusive. */
987 btrace_insn_next (&end, 1);
990 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
991 btrace_set_insn_history (btinfo, &begin, &end);
994 /* The insn_history_from method of target record-btrace. */
997 record_btrace_target::insn_history_from (ULONGEST from, int size,
998 gdb_disassembly_flags flags)
1000 ULONGEST begin, end, context;
1002 context = abs (size);
1004 error (_("Bad record instruction-history-size."));
1013 begin = from - context + 1;
1018 end = from + context - 1;
1020 /* Check for wrap-around. */
1025 insn_history_range (begin, end, flags);
1028 /* Print the instruction number range for a function call history line. */
1031 btrace_call_history_insn_range (struct ui_out *uiout,
1032 const struct btrace_function *bfun)
1034 unsigned int begin, end, size;
1036 size = bfun->insn.size ();
1037 gdb_assert (size > 0);
1039 begin = bfun->insn_offset;
1040 end = begin + size - 1;
1042 ui_out_field_uint (uiout, "insn begin", begin);
1044 ui_out_field_uint (uiout, "insn end", end);
1047 /* Compute the lowest and highest source line for the instructions in BFUN
1048 and return them in PBEGIN and PEND.
1049 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1050 result from inlining or macro expansion. */
1053 btrace_compute_src_line_range (const struct btrace_function *bfun,
1054 int *pbegin, int *pend)
1056 struct symtab *symtab;
1067 symtab = symbol_symtab (sym);
1069 for (const btrace_insn &insn : bfun->insn)
1071 struct symtab_and_line sal;
1073 sal = find_pc_line (insn.pc, 0);
1074 if (sal.symtab != symtab || sal.line == 0)
1077 begin = std::min (begin, sal.line);
1078 end = std::max (end, sal.line);
1086 /* Print the source line information for a function call history line. */
1089 btrace_call_history_src_line (struct ui_out *uiout,
1090 const struct btrace_function *bfun)
1099 uiout->field_string ("file",
1100 symtab_to_filename_for_display (symbol_symtab (sym)));
1102 btrace_compute_src_line_range (bfun, &begin, &end);
1107 uiout->field_int ("min line", begin);
1113 uiout->field_int ("max line", end);
1116 /* Get the name of a branch trace function. */
1119 btrace_get_bfun_name (const struct btrace_function *bfun)
1121 struct minimal_symbol *msym;
1131 return SYMBOL_PRINT_NAME (sym);
1132 else if (msym != NULL)
1133 return MSYMBOL_PRINT_NAME (msym);
1138 /* Disassemble a section of the recorded function trace. */
1141 btrace_call_history (struct ui_out *uiout,
1142 const struct btrace_thread_info *btinfo,
1143 const struct btrace_call_iterator *begin,
1144 const struct btrace_call_iterator *end,
1147 struct btrace_call_iterator it;
1148 record_print_flags flags = (enum record_print_flag) int_flags;
1150 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1151 btrace_call_number (end));
1153 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1155 const struct btrace_function *bfun;
1156 struct minimal_symbol *msym;
1159 bfun = btrace_call_get (&it);
1163 /* Print the function index. */
1164 ui_out_field_uint (uiout, "index", bfun->number);
1167 /* Indicate gaps in the trace. */
1168 if (bfun->errcode != 0)
1170 const struct btrace_config *conf;
1172 conf = btrace_conf (btinfo);
1174 /* We have trace so we must have a configuration. */
1175 gdb_assert (conf != NULL);
1177 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1182 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1184 int level = bfun->level + btinfo->level, i;
1186 for (i = 0; i < level; ++i)
1191 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1192 else if (msym != NULL)
1193 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1194 else if (!uiout->is_mi_like_p ())
1195 uiout->field_string ("function", "??");
1197 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1199 uiout->text (_("\tinst "));
1200 btrace_call_history_insn_range (uiout, bfun);
1203 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1205 uiout->text (_("\tat "));
1206 btrace_call_history_src_line (uiout, bfun);
1213 /* The call_history method of target record-btrace. */
1216 record_btrace_target::call_history (int size, record_print_flags flags)
1218 struct btrace_thread_info *btinfo;
1219 struct btrace_call_history *history;
1220 struct btrace_call_iterator begin, end;
1221 struct ui_out *uiout;
1222 unsigned int context, covered;
1224 uiout = current_uiout;
1225 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1226 context = abs (size);
1228 error (_("Bad record function-call-history-size."));
1230 btinfo = require_btrace ();
1231 history = btinfo->call_history;
1232 if (history == NULL)
1234 struct btrace_insn_iterator *replay;
1236 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1238 /* If we're replaying, we start at the replay position. Otherwise, we
1239 start at the tail of the trace. */
1240 replay = btinfo->replay;
1243 begin.btinfo = btinfo;
1244 begin.index = replay->call_index;
1247 btrace_call_end (&begin, btinfo);
1249 /* We start from here and expand in the requested direction. Then we
1250 expand in the other direction, as well, to fill up any remaining
1255 /* We want the current position covered, as well. */
1256 covered = btrace_call_next (&end, 1);
1257 covered += btrace_call_prev (&begin, context - covered);
1258 covered += btrace_call_next (&end, context - covered);
1262 covered = btrace_call_next (&end, context);
1263 covered += btrace_call_prev (&begin, context- covered);
1268 begin = history->begin;
1271 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1272 btrace_call_number (&begin), btrace_call_number (&end));
1277 covered = btrace_call_prev (&begin, context);
1282 covered = btrace_call_next (&end, context);
1287 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1291 printf_unfiltered (_("At the start of the branch trace record.\n"));
1293 printf_unfiltered (_("At the end of the branch trace record.\n"));
1296 btrace_set_call_history (btinfo, &begin, &end);
1299 /* The call_history_range method of target record-btrace. */
1302 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1303 record_print_flags flags)
1305 struct btrace_thread_info *btinfo;
1306 struct btrace_call_iterator begin, end;
1307 struct ui_out *uiout;
1308 unsigned int low, high;
1311 uiout = current_uiout;
1312 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1316 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1318 /* Check for wrap-arounds. */
1319 if (low != from || high != to)
1320 error (_("Bad range."));
1323 error (_("Bad range."));
1325 btinfo = require_btrace ();
1327 found = btrace_find_call_by_number (&begin, btinfo, low);
1329 error (_("Range out of bounds."));
1331 found = btrace_find_call_by_number (&end, btinfo, high);
1334 /* Silently truncate the range. */
1335 btrace_call_end (&end, btinfo);
1339 /* We want both begin and end to be inclusive. */
1340 btrace_call_next (&end, 1);
1343 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1344 btrace_set_call_history (btinfo, &begin, &end);
1347 /* The call_history_from method of target record-btrace. */
1350 record_btrace_target::call_history_from (ULONGEST from, int size,
1351 record_print_flags flags)
1353 ULONGEST begin, end, context;
1355 context = abs (size);
1357 error (_("Bad record function-call-history-size."));
1366 begin = from - context + 1;
1371 end = from + context - 1;
1373 /* Check for wrap-around. */
1378 call_history_range ( begin, end, flags);
1381 /* The record_method method of target record-btrace. */
1384 record_btrace_target::record_method (ptid_t ptid)
1386 struct thread_info * const tp = find_thread_ptid (ptid);
1389 error (_("No thread."));
1391 if (tp->btrace.target == NULL)
1392 return RECORD_METHOD_NONE;
1394 return RECORD_METHOD_BTRACE;
1397 /* The record_is_replaying method of target record-btrace. */
1400 record_btrace_target::record_is_replaying (ptid_t ptid)
1402 struct thread_info *tp;
1404 ALL_NON_EXITED_THREADS (tp)
1405 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1411 /* The record_will_replay method of target record-btrace. */
1414 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1416 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1419 /* The xfer_partial method of target record-btrace. */
1421 enum target_xfer_status
1422 record_btrace_target::xfer_partial (enum target_object object,
1423 const char *annex, gdb_byte *readbuf,
1424 const gdb_byte *writebuf, ULONGEST offset,
1425 ULONGEST len, ULONGEST *xfered_len)
1427 /* Filter out requests that don't make sense during replay. */
1428 if (replay_memory_access == replay_memory_access_read_only
1429 && !record_btrace_generating_corefile
1430 && record_is_replaying (inferior_ptid))
1434 case TARGET_OBJECT_MEMORY:
1436 struct target_section *section;
1438 /* We do not allow writing memory in general. */
1439 if (writebuf != NULL)
1442 return TARGET_XFER_UNAVAILABLE;
1445 /* We allow reading readonly memory. */
1446 section = target_section_by_addr (this, offset);
1447 if (section != NULL)
1449 /* Check if the section we found is readonly. */
1450 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1451 section->the_bfd_section)
1452 & SEC_READONLY) != 0)
1454 /* Truncate the request to fit into this section. */
1455 len = std::min (len, section->endaddr - offset);
1461 return TARGET_XFER_UNAVAILABLE;
1466 /* Forward the request. */
1467 return this->beneath->xfer_partial (object, annex, readbuf, writebuf,
1468 offset, len, xfered_len);
1471 /* The insert_breakpoint method of target record-btrace. */
1474 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1475 struct bp_target_info *bp_tgt)
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
1482 old = replay_memory_access;
1483 replay_memory_access = replay_memory_access_read_write;
1488 ret = this->beneath->insert_breakpoint (gdbarch, bp_tgt);
1490 CATCH (except, RETURN_MASK_ALL)
1492 replay_memory_access = old;
1493 throw_exception (except);
1496 replay_memory_access = old;
1501 /* The remove_breakpoint method of target record-btrace. */
1504 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1505 struct bp_target_info *bp_tgt,
1506 enum remove_bp_reason reason)
1511 /* Removing breakpoints requires accessing memory. Allow it for the
1512 duration of this function. */
1513 old = replay_memory_access;
1514 replay_memory_access = replay_memory_access_read_write;
1519 ret = this->beneath->remove_breakpoint (gdbarch, bp_tgt, reason);
1521 CATCH (except, RETURN_MASK_ALL)
1523 replay_memory_access = old;
1524 throw_exception (except);
1527 replay_memory_access = old;
1532 /* The fetch_registers method of target record-btrace. */
1535 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1537 struct btrace_insn_iterator *replay;
1538 struct thread_info *tp;
1540 tp = find_thread_ptid (regcache_get_ptid (regcache));
1541 gdb_assert (tp != NULL);
1543 replay = tp->btrace.replay;
1544 if (replay != NULL && !record_btrace_generating_corefile)
1546 const struct btrace_insn *insn;
1547 struct gdbarch *gdbarch;
1550 gdbarch = regcache->arch ();
1551 pcreg = gdbarch_pc_regnum (gdbarch);
1555 /* We can only provide the PC register. */
1556 if (regno >= 0 && regno != pcreg)
1559 insn = btrace_insn_get (replay);
1560 gdb_assert (insn != NULL);
1562 regcache_raw_supply (regcache, regno, &insn->pc);
1565 this->beneath->fetch_registers (regcache, regno);
1568 /* The store_registers method of target record-btrace. */
1571 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1573 struct target_ops *t;
1575 if (!record_btrace_generating_corefile
1576 && record_is_replaying (regcache_get_ptid (regcache)))
1577 error (_("Cannot write registers while replaying."));
1579 gdb_assert (may_write_registers != 0);
1581 this->beneath->store_registers (regcache, regno);
1584 /* The prepare_to_store method of target record-btrace. */
1587 record_btrace_target::prepare_to_store (struct regcache *regcache)
1589 if (!record_btrace_generating_corefile
1590 && record_is_replaying (regcache_get_ptid (regcache)))
1593 this->beneath->prepare_to_store (regcache);
1596 /* The branch trace frame cache. */
1598 struct btrace_frame_cache
1601 struct thread_info *tp;
1603 /* The frame info. */
1604 struct frame_info *frame;
1606 /* The branch trace function segment. */
1607 const struct btrace_function *bfun;
1610 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1612 static htab_t bfcache;
1614 /* hash_f for htab_create_alloc of bfcache. */
1617 bfcache_hash (const void *arg)
1619 const struct btrace_frame_cache *cache
1620 = (const struct btrace_frame_cache *) arg;
1622 return htab_hash_pointer (cache->frame);
1625 /* eq_f for htab_create_alloc of bfcache. */
1628 bfcache_eq (const void *arg1, const void *arg2)
1630 const struct btrace_frame_cache *cache1
1631 = (const struct btrace_frame_cache *) arg1;
1632 const struct btrace_frame_cache *cache2
1633 = (const struct btrace_frame_cache *) arg2;
1635 return cache1->frame == cache2->frame;
1638 /* Create a new btrace frame cache. */
1640 static struct btrace_frame_cache *
1641 bfcache_new (struct frame_info *frame)
1643 struct btrace_frame_cache *cache;
1646 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1647 cache->frame = frame;
1649 slot = htab_find_slot (bfcache, cache, INSERT);
1650 gdb_assert (*slot == NULL);
1656 /* Extract the branch trace function from a branch trace frame. */
1658 static const struct btrace_function *
1659 btrace_get_frame_function (struct frame_info *frame)
1661 const struct btrace_frame_cache *cache;
1662 struct btrace_frame_cache pattern;
1665 pattern.frame = frame;
1667 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1671 cache = (const struct btrace_frame_cache *) *slot;
1675 /* Implement stop_reason method for record_btrace_frame_unwind. */
1677 static enum unwind_stop_reason
1678 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1681 const struct btrace_frame_cache *cache;
1682 const struct btrace_function *bfun;
1684 cache = (const struct btrace_frame_cache *) *this_cache;
1686 gdb_assert (bfun != NULL);
1689 return UNWIND_UNAVAILABLE;
1691 return UNWIND_NO_REASON;
1694 /* Implement this_id method for record_btrace_frame_unwind. */
1697 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1698 struct frame_id *this_id)
1700 const struct btrace_frame_cache *cache;
1701 const struct btrace_function *bfun;
1702 struct btrace_call_iterator it;
1703 CORE_ADDR code, special;
1705 cache = (const struct btrace_frame_cache *) *this_cache;
1708 gdb_assert (bfun != NULL);
1710 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1711 bfun = btrace_call_get (&it);
1713 code = get_frame_func (this_frame);
1714 special = bfun->number;
1716 *this_id = frame_id_build_unavailable_stack_special (code, special);
1718 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1719 btrace_get_bfun_name (cache->bfun),
1720 core_addr_to_string_nz (this_id->code_addr),
1721 core_addr_to_string_nz (this_id->special_addr));
1724 /* Implement prev_register method for record_btrace_frame_unwind. */
1726 static struct value *
1727 record_btrace_frame_prev_register (struct frame_info *this_frame,
1731 const struct btrace_frame_cache *cache;
1732 const struct btrace_function *bfun, *caller;
1733 struct btrace_call_iterator it;
1734 struct gdbarch *gdbarch;
1738 gdbarch = get_frame_arch (this_frame);
1739 pcreg = gdbarch_pc_regnum (gdbarch);
1740 if (pcreg < 0 || regnum != pcreg)
1741 throw_error (NOT_AVAILABLE_ERROR,
1742 _("Registers are not available in btrace record history"));
1744 cache = (const struct btrace_frame_cache *) *this_cache;
1746 gdb_assert (bfun != NULL);
1748 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1749 throw_error (NOT_AVAILABLE_ERROR,
1750 _("No caller in btrace record history"));
1752 caller = btrace_call_get (&it);
1754 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1755 pc = caller->insn.front ().pc;
1758 pc = caller->insn.back ().pc;
1759 pc += gdb_insn_length (gdbarch, pc);
1762 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1763 btrace_get_bfun_name (bfun), bfun->level,
1764 core_addr_to_string_nz (pc));
1766 return frame_unwind_got_address (this_frame, regnum, pc);
1769 /* Implement sniffer method for record_btrace_frame_unwind. */
1772 record_btrace_frame_sniffer (const struct frame_unwind *self,
1773 struct frame_info *this_frame,
1776 const struct btrace_function *bfun;
1777 struct btrace_frame_cache *cache;
1778 struct thread_info *tp;
1779 struct frame_info *next;
1781 /* THIS_FRAME does not contain a reference to its thread. */
1782 tp = find_thread_ptid (inferior_ptid);
1783 gdb_assert (tp != NULL);
1786 next = get_next_frame (this_frame);
1789 const struct btrace_insn_iterator *replay;
1791 replay = tp->btrace.replay;
1793 bfun = &replay->btinfo->functions[replay->call_index];
1797 const struct btrace_function *callee;
1798 struct btrace_call_iterator it;
1800 callee = btrace_get_frame_function (next);
1801 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1804 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1807 bfun = btrace_call_get (&it);
1813 DEBUG ("[frame] sniffed frame for %s on level %d",
1814 btrace_get_bfun_name (bfun), bfun->level);
1816 /* This is our frame. Initialize the frame cache. */
1817 cache = bfcache_new (this_frame);
1821 *this_cache = cache;
1825 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1828 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1829 struct frame_info *this_frame,
1832 const struct btrace_function *bfun, *callee;
1833 struct btrace_frame_cache *cache;
1834 struct btrace_call_iterator it;
1835 struct frame_info *next;
1836 struct thread_info *tinfo;
1838 next = get_next_frame (this_frame);
1842 callee = btrace_get_frame_function (next);
1846 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1849 tinfo = find_thread_ptid (inferior_ptid);
1850 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1853 bfun = btrace_call_get (&it);
1855 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1856 btrace_get_bfun_name (bfun), bfun->level);
1858 /* This is our frame. Initialize the frame cache. */
1859 cache = bfcache_new (this_frame);
1863 *this_cache = cache;
1868 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1870 struct btrace_frame_cache *cache;
1873 cache = (struct btrace_frame_cache *) this_cache;
1875 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1876 gdb_assert (slot != NULL);
1878 htab_remove_elt (bfcache, cache);
1881 /* btrace recording does not store previous memory content, neither the stack
1882 frames content. Any unwinding would return errorneous results as the stack
1883 contents no longer matches the changed PC value restored from history.
1884 Therefore this unwinder reports any possibly unwound registers as
1887 const struct frame_unwind record_btrace_frame_unwind =
1890 record_btrace_frame_unwind_stop_reason,
1891 record_btrace_frame_this_id,
1892 record_btrace_frame_prev_register,
1894 record_btrace_frame_sniffer,
1895 record_btrace_frame_dealloc_cache
1898 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1901 record_btrace_frame_unwind_stop_reason,
1902 record_btrace_frame_this_id,
1903 record_btrace_frame_prev_register,
1905 record_btrace_tailcall_frame_sniffer,
1906 record_btrace_frame_dealloc_cache
1909 /* Implement the get_unwinder method. */
1911 const struct frame_unwind *
1912 record_btrace_target::get_unwinder ()
1914 return &record_btrace_frame_unwind;
1917 /* Implement the get_tailcall_unwinder method. */
1919 const struct frame_unwind *
1920 record_btrace_target::get_tailcall_unwinder ()
1922 return &record_btrace_tailcall_frame_unwind;
1925 /* Return a human-readable string for FLAG. */
1928 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1936 return "reverse-step";
1942 return "reverse-cont";
1951 /* Indicate that TP should be resumed according to FLAG. */
1954 record_btrace_resume_thread (struct thread_info *tp,
1955 enum btrace_thread_flag flag)
1957 struct btrace_thread_info *btinfo;
1959 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1960 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1962 btinfo = &tp->btrace;
1964 /* Fetch the latest branch trace. */
1965 btrace_fetch (tp, record_btrace_get_cpu ());
1967 /* A resume request overwrites a preceding resume or stop request. */
1968 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1969 btinfo->flags |= flag;
1972 /* Get the current frame for TP. */
1974 static struct frame_info *
1975 get_thread_current_frame (struct thread_info *tp)
1977 struct frame_info *frame;
1978 ptid_t old_inferior_ptid;
1981 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1982 old_inferior_ptid = inferior_ptid;
1983 inferior_ptid = tp->ptid;
1985 /* Clear the executing flag to allow changes to the current frame.
1986 We are not actually running, yet. We just started a reverse execution
1987 command or a record goto command.
1988 For the latter, EXECUTING is false and this has no effect.
1989 For the former, EXECUTING is true and we're in wait, about to
1990 move the thread. Since we need to recompute the stack, we temporarily
1991 set EXECUTING to flase. */
1992 executing = is_executing (inferior_ptid);
1993 set_executing (inferior_ptid, 0);
1998 frame = get_current_frame ();
2000 CATCH (except, RETURN_MASK_ALL)
2002 /* Restore the previous execution state. */
2003 set_executing (inferior_ptid, executing);
2005 /* Restore the previous inferior_ptid. */
2006 inferior_ptid = old_inferior_ptid;
2008 throw_exception (except);
2012 /* Restore the previous execution state. */
2013 set_executing (inferior_ptid, executing);
2015 /* Restore the previous inferior_ptid. */
2016 inferior_ptid = old_inferior_ptid;
2021 /* Start replaying a thread. */
2023 static struct btrace_insn_iterator *
2024 record_btrace_start_replaying (struct thread_info *tp)
2026 struct btrace_insn_iterator *replay;
2027 struct btrace_thread_info *btinfo;
2029 btinfo = &tp->btrace;
2032 /* We can't start replaying without trace. */
2033 if (btinfo->functions.empty ())
2036 /* GDB stores the current frame_id when stepping in order to detects steps
2038 Since frames are computed differently when we're replaying, we need to
2039 recompute those stored frames and fix them up so we can still detect
2040 subroutines after we started replaying. */
2043 struct frame_info *frame;
2044 struct frame_id frame_id;
2045 int upd_step_frame_id, upd_step_stack_frame_id;
2047 /* The current frame without replaying - computed via normal unwind. */
2048 frame = get_thread_current_frame (tp);
2049 frame_id = get_frame_id (frame);
2051 /* Check if we need to update any stepping-related frame id's. */
2052 upd_step_frame_id = frame_id_eq (frame_id,
2053 tp->control.step_frame_id);
2054 upd_step_stack_frame_id = frame_id_eq (frame_id,
2055 tp->control.step_stack_frame_id);
2057 /* We start replaying at the end of the branch trace. This corresponds
2058 to the current instruction. */
2059 replay = XNEW (struct btrace_insn_iterator);
2060 btrace_insn_end (replay, btinfo);
2062 /* Skip gaps at the end of the trace. */
2063 while (btrace_insn_get (replay) == NULL)
2067 steps = btrace_insn_prev (replay, 1);
2069 error (_("No trace."));
2072 /* We're not replaying, yet. */
2073 gdb_assert (btinfo->replay == NULL);
2074 btinfo->replay = replay;
2076 /* Make sure we're not using any stale registers. */
2077 registers_changed_ptid (tp->ptid);
2079 /* The current frame with replaying - computed via btrace unwind. */
2080 frame = get_thread_current_frame (tp);
2081 frame_id = get_frame_id (frame);
2083 /* Replace stepping related frames where necessary. */
2084 if (upd_step_frame_id)
2085 tp->control.step_frame_id = frame_id;
2086 if (upd_step_stack_frame_id)
2087 tp->control.step_stack_frame_id = frame_id;
2089 CATCH (except, RETURN_MASK_ALL)
2091 xfree (btinfo->replay);
2092 btinfo->replay = NULL;
2094 registers_changed_ptid (tp->ptid);
2096 throw_exception (except);
2103 /* Stop replaying a thread. */
2106 record_btrace_stop_replaying (struct thread_info *tp)
2108 struct btrace_thread_info *btinfo;
2110 btinfo = &tp->btrace;
2112 xfree (btinfo->replay);
2113 btinfo->replay = NULL;
2115 /* Make sure we're not leaving any stale registers. */
2116 registers_changed_ptid (tp->ptid);
2119 /* Stop replaying TP if it is at the end of its execution history. */
2122 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2124 struct btrace_insn_iterator *replay, end;
2125 struct btrace_thread_info *btinfo;
2127 btinfo = &tp->btrace;
2128 replay = btinfo->replay;
2133 btrace_insn_end (&end, btinfo);
2135 if (btrace_insn_cmp (replay, &end) == 0)
2136 record_btrace_stop_replaying (tp);
2139 /* The resume method of target record-btrace. */
2142 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2144 struct thread_info *tp;
2145 enum btrace_thread_flag flag, cflag;
2147 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2148 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2149 step ? "step" : "cont");
2151 /* Store the execution direction of the last resume.
2153 If there is more than one resume call, we have to rely on infrun
2154 to not change the execution direction in-between. */
2155 record_btrace_resume_exec_dir = ::execution_direction;
2157 /* As long as we're not replaying, just forward the request.
2159 For non-stop targets this means that no thread is replaying. In order to
2160 make progress, we may need to explicitly move replaying threads to the end
2161 of their execution history. */
2162 if ((::execution_direction != EXEC_REVERSE)
2163 && !record_is_replaying (minus_one_ptid))
2165 this->beneath->resume (ptid, step, signal);
2169 /* Compute the btrace thread flag for the requested move. */
2170 if (::execution_direction == EXEC_REVERSE)
2172 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2177 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2181 /* We just indicate the resume intent here. The actual stepping happens in
2182 record_btrace_wait below.
2184 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2185 if (!target_is_non_stop_p ())
2187 gdb_assert (ptid_match (inferior_ptid, ptid));
2189 ALL_NON_EXITED_THREADS (tp)
2190 if (ptid_match (tp->ptid, ptid))
2192 if (ptid_match (tp->ptid, inferior_ptid))
2193 record_btrace_resume_thread (tp, flag);
2195 record_btrace_resume_thread (tp, cflag);
2200 ALL_NON_EXITED_THREADS (tp)
2201 if (ptid_match (tp->ptid, ptid))
2202 record_btrace_resume_thread (tp, flag);
2205 /* Async support. */
2206 if (target_can_async_p ())
2209 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2213 /* The commit_resume method of target record-btrace. */
2216 record_btrace_target::commit_resume ()
2218 if ((::execution_direction != EXEC_REVERSE)
2219 && !record_is_replaying (minus_one_ptid))
2220 beneath->commit_resume ();
2223 /* Cancel resuming TP. */
2226 record_btrace_cancel_resume (struct thread_info *tp)
2228 enum btrace_thread_flag flags;
2230 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2234 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2235 print_thread_id (tp),
2236 target_pid_to_str (tp->ptid), flags,
2237 btrace_thread_flag_to_str (flags));
2239 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2240 record_btrace_stop_replaying_at_end (tp);
2243 /* Return a target_waitstatus indicating that we ran out of history. */
2245 static struct target_waitstatus
2246 btrace_step_no_history (void)
2248 struct target_waitstatus status;
2250 status.kind = TARGET_WAITKIND_NO_HISTORY;
2255 /* Return a target_waitstatus indicating that a step finished. */
2257 static struct target_waitstatus
2258 btrace_step_stopped (void)
2260 struct target_waitstatus status;
2262 status.kind = TARGET_WAITKIND_STOPPED;
2263 status.value.sig = GDB_SIGNAL_TRAP;
2268 /* Return a target_waitstatus indicating that a thread was stopped as
2271 static struct target_waitstatus
2272 btrace_step_stopped_on_request (void)
2274 struct target_waitstatus status;
2276 status.kind = TARGET_WAITKIND_STOPPED;
2277 status.value.sig = GDB_SIGNAL_0;
2282 /* Return a target_waitstatus indicating a spurious stop. */
2284 static struct target_waitstatus
2285 btrace_step_spurious (void)
2287 struct target_waitstatus status;
2289 status.kind = TARGET_WAITKIND_SPURIOUS;
2294 /* Return a target_waitstatus indicating that the thread was not resumed. */
2296 static struct target_waitstatus
2297 btrace_step_no_resumed (void)
2299 struct target_waitstatus status;
2301 status.kind = TARGET_WAITKIND_NO_RESUMED;
2306 /* Return a target_waitstatus indicating that we should wait again. */
2308 static struct target_waitstatus
2309 btrace_step_again (void)
2311 struct target_waitstatus status;
2313 status.kind = TARGET_WAITKIND_IGNORE;
2318 /* Clear the record histories. */
2321 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2323 xfree (btinfo->insn_history);
2324 xfree (btinfo->call_history);
2326 btinfo->insn_history = NULL;
2327 btinfo->call_history = NULL;
2330 /* Check whether TP's current replay position is at a breakpoint. */
2333 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2335 struct btrace_insn_iterator *replay;
2336 struct btrace_thread_info *btinfo;
2337 const struct btrace_insn *insn;
2338 struct inferior *inf;
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2346 insn = btrace_insn_get (replay);
2350 inf = find_inferior_ptid (tp->ptid);
2354 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2355 &btinfo->stop_reason);
2358 /* Step one instruction in forward direction. */
2360 static struct target_waitstatus
2361 record_btrace_single_step_forward (struct thread_info *tp)
2363 struct btrace_insn_iterator *replay, end, start;
2364 struct btrace_thread_info *btinfo;
2366 btinfo = &tp->btrace;
2367 replay = btinfo->replay;
2369 /* We're done if we're not replaying. */
2371 return btrace_step_no_history ();
2373 /* Check if we're stepping a breakpoint. */
2374 if (record_btrace_replay_at_breakpoint (tp))
2375 return btrace_step_stopped ();
2377 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2378 jump back to the instruction at which we started. */
2384 /* We will bail out here if we continue stepping after reaching the end
2385 of the execution history. */
2386 steps = btrace_insn_next (replay, 1);
2390 return btrace_step_no_history ();
2393 while (btrace_insn_get (replay) == NULL);
2395 /* Determine the end of the instruction trace. */
2396 btrace_insn_end (&end, btinfo);
2398 /* The execution trace contains (and ends with) the current instruction.
2399 This instruction has not been executed, yet, so the trace really ends
2400 one instruction earlier. */
2401 if (btrace_insn_cmp (replay, &end) == 0)
2402 return btrace_step_no_history ();
2404 return btrace_step_spurious ();
2407 /* Step one instruction in backward direction. */
2409 static struct target_waitstatus
2410 record_btrace_single_step_backward (struct thread_info *tp)
2412 struct btrace_insn_iterator *replay, start;
2413 struct btrace_thread_info *btinfo;
2415 btinfo = &tp->btrace;
2416 replay = btinfo->replay;
2418 /* Start replaying if we're not already doing so. */
2420 replay = record_btrace_start_replaying (tp);
2422 /* If we can't step any further, we reached the end of the history.
2423 Skip gaps during replay. If we end up at a gap (at the beginning of
2424 the trace), jump back to the instruction at which we started. */
2430 steps = btrace_insn_prev (replay, 1);
2434 return btrace_step_no_history ();
2437 while (btrace_insn_get (replay) == NULL);
2439 /* Check if we're stepping a breakpoint.
2441 For reverse-stepping, this check is after the step. There is logic in
2442 infrun.c that handles reverse-stepping separately. See, for example,
2443 proceed and adjust_pc_after_break.
2445 This code assumes that for reverse-stepping, PC points to the last
2446 de-executed instruction, whereas for forward-stepping PC points to the
2447 next to-be-executed instruction. */
2448 if (record_btrace_replay_at_breakpoint (tp))
2449 return btrace_step_stopped ();
2451 return btrace_step_spurious ();
2454 /* Step a single thread. */
2456 static struct target_waitstatus
2457 record_btrace_step_thread (struct thread_info *tp)
2459 struct btrace_thread_info *btinfo;
2460 struct target_waitstatus status;
2461 enum btrace_thread_flag flags;
2463 btinfo = &tp->btrace;
2465 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2466 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2468 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2469 target_pid_to_str (tp->ptid), flags,
2470 btrace_thread_flag_to_str (flags));
2472 /* We can't step without an execution history. */
2473 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2474 return btrace_step_no_history ();
2479 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2482 return btrace_step_stopped_on_request ();
2485 status = record_btrace_single_step_forward (tp);
2486 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2489 return btrace_step_stopped ();
2492 status = record_btrace_single_step_backward (tp);
2493 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2496 return btrace_step_stopped ();
2499 status = record_btrace_single_step_forward (tp);
2500 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2503 btinfo->flags |= flags;
2504 return btrace_step_again ();
2507 status = record_btrace_single_step_backward (tp);
2508 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2511 btinfo->flags |= flags;
2512 return btrace_step_again ();
2515 /* We keep threads moving at the end of their execution history. The wait
2516 method will stop the thread for whom the event is reported. */
2517 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2518 btinfo->flags |= flags;
2523 /* A vector of threads. */
2525 typedef struct thread_info * tp_t;
2528 /* Announce further events if necessary. */
2531 record_btrace_maybe_mark_async_event
2532 (const std::vector<thread_info *> &moving,
2533 const std::vector<thread_info *> &no_history)
2535 bool more_moving = !moving.empty ();
2536 bool more_no_history = !no_history.empty ();;
2538 if (!more_moving && !more_no_history)
2542 DEBUG ("movers pending");
2544 if (more_no_history)
2545 DEBUG ("no-history pending");
2547 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2550 /* The wait method of target record-btrace. */
2553 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2556 std::vector<thread_info *> moving;
2557 std::vector<thread_info *> no_history;
2559 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2561 /* As long as we're not replaying, just forward the request. */
2562 if ((::execution_direction != EXEC_REVERSE)
2563 && !record_is_replaying (minus_one_ptid))
2565 return this->beneath->wait (ptid, status, options);
2568 /* Keep a work list of moving threads. */
2572 ALL_NON_EXITED_THREADS (tp)
2574 if (ptid_match (tp->ptid, ptid)
2575 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2576 moving.push_back (tp);
2580 if (moving.empty ())
2582 *status = btrace_step_no_resumed ();
2584 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2585 target_waitstatus_to_string (status).c_str ());
2590 /* Step moving threads one by one, one step each, until either one thread
2591 reports an event or we run out of threads to step.
2593 When stepping more than one thread, chances are that some threads reach
2594 the end of their execution history earlier than others. If we reported
2595 this immediately, all-stop on top of non-stop would stop all threads and
2596 resume the same threads next time. And we would report the same thread
2597 having reached the end of its execution history again.
2599 In the worst case, this would starve the other threads. But even if other
2600 threads would be allowed to make progress, this would result in far too
2601 many intermediate stops.
2603 We therefore delay the reporting of "no execution history" until we have
2604 nothing else to report. By this time, all threads should have moved to
2605 either the beginning or the end of their execution history. There will
2606 be a single user-visible stop. */
2607 struct thread_info *eventing = NULL;
2608 while ((eventing == NULL) && !moving.empty ())
2610 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2612 thread_info *tp = moving[ix];
2614 *status = record_btrace_step_thread (tp);
2616 switch (status->kind)
2618 case TARGET_WAITKIND_IGNORE:
2622 case TARGET_WAITKIND_NO_HISTORY:
2623 no_history.push_back (ordered_remove (moving, ix));
2627 eventing = unordered_remove (moving, ix);
2633 if (eventing == NULL)
2635 /* We started with at least one moving thread. This thread must have
2636 either stopped or reached the end of its execution history.
2638 In the former case, EVENTING must not be NULL.
2639 In the latter case, NO_HISTORY must not be empty. */
2640 gdb_assert (!no_history.empty ());
2642 /* We kept threads moving at the end of their execution history. Stop
2643 EVENTING now that we are going to report its stop. */
2644 eventing = unordered_remove (no_history, 0);
2645 eventing->btrace.flags &= ~BTHR_MOVE;
2647 *status = btrace_step_no_history ();
2650 gdb_assert (eventing != NULL);
2652 /* We kept threads replaying at the end of their execution history. Stop
2653 replaying EVENTING now that we are going to report its stop. */
2654 record_btrace_stop_replaying_at_end (eventing);
2656 /* Stop all other threads. */
2657 if (!target_is_non_stop_p ())
2661 ALL_NON_EXITED_THREADS (tp)
2662 record_btrace_cancel_resume (tp);
2665 /* In async mode, we need to announce further events. */
2666 if (target_is_async_p ())
2667 record_btrace_maybe_mark_async_event (moving, no_history);
2669 /* Start record histories anew from the current position. */
2670 record_btrace_clear_histories (&eventing->btrace);
2672 /* We moved the replay position but did not update registers. */
2673 registers_changed_ptid (eventing->ptid);
2675 DEBUG ("wait ended by thread %s (%s): %s",
2676 print_thread_id (eventing),
2677 target_pid_to_str (eventing->ptid),
2678 target_waitstatus_to_string (status).c_str ());
2680 return eventing->ptid;
2683 /* The stop method of target record-btrace. */
2686 record_btrace_target::stop (ptid_t ptid)
2688 DEBUG ("stop %s", target_pid_to_str (ptid));
2690 /* As long as we're not replaying, just forward the request. */
2691 if ((::execution_direction != EXEC_REVERSE)
2692 && !record_is_replaying (minus_one_ptid))
2694 this->beneath->stop (ptid);
2698 struct thread_info *tp;
2700 ALL_NON_EXITED_THREADS (tp)
2701 if (ptid_match (tp->ptid, ptid))
2703 tp->btrace.flags &= ~BTHR_MOVE;
2704 tp->btrace.flags |= BTHR_STOP;
2709 /* The can_execute_reverse method of target record-btrace. */
2712 record_btrace_target::can_execute_reverse ()
2717 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2720 record_btrace_target::stopped_by_sw_breakpoint ()
2722 if (record_is_replaying (minus_one_ptid))
2724 struct thread_info *tp = inferior_thread ();
2726 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2729 return this->beneath->stopped_by_sw_breakpoint ();
2732 /* The supports_stopped_by_sw_breakpoint method of target
2736 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2738 if (record_is_replaying (minus_one_ptid))
2741 return this->beneath->supports_stopped_by_sw_breakpoint ();
2744 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2747 record_btrace_target::stopped_by_hw_breakpoint ()
2749 if (record_is_replaying (minus_one_ptid))
2751 struct thread_info *tp = inferior_thread ();
2753 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2756 return this->beneath->stopped_by_hw_breakpoint ();
2759 /* The supports_stopped_by_hw_breakpoint method of target
2763 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2765 if (record_is_replaying (minus_one_ptid))
2768 return this->beneath->supports_stopped_by_hw_breakpoint ();
2771 /* The update_thread_list method of target record-btrace. */
2774 record_btrace_target::update_thread_list ()
2776 /* We don't add or remove threads during replay. */
2777 if (record_is_replaying (minus_one_ptid))
2780 /* Forward the request. */
2781 this->beneath->update_thread_list ();
2784 /* The thread_alive method of target record-btrace. */
2787 record_btrace_target::thread_alive (ptid_t ptid)
2789 /* We don't add or remove threads during replay. */
2790 if (record_is_replaying (minus_one_ptid))
2791 return find_thread_ptid (ptid) != NULL;
2793 /* Forward the request. */
2794 return this->beneath->thread_alive (ptid);
2797 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2801 record_btrace_set_replay (struct thread_info *tp,
2802 const struct btrace_insn_iterator *it)
2804 struct btrace_thread_info *btinfo;
2806 btinfo = &tp->btrace;
2809 record_btrace_stop_replaying (tp);
2812 if (btinfo->replay == NULL)
2813 record_btrace_start_replaying (tp);
2814 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2817 *btinfo->replay = *it;
2818 registers_changed_ptid (tp->ptid);
2821 /* Start anew from the new replay position. */
2822 record_btrace_clear_histories (btinfo);
2824 stop_pc = regcache_read_pc (get_current_regcache ());
2825 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2828 /* The goto_record_begin method of target record-btrace. */
2831 record_btrace_target::goto_record_begin ()
2833 struct thread_info *tp;
2834 struct btrace_insn_iterator begin;
2836 tp = require_btrace_thread ();
2838 btrace_insn_begin (&begin, &tp->btrace);
2840 /* Skip gaps at the beginning of the trace. */
2841 while (btrace_insn_get (&begin) == NULL)
2845 steps = btrace_insn_next (&begin, 1);
2847 error (_("No trace."));
2850 record_btrace_set_replay (tp, &begin);
2853 /* The goto_record_end method of target record-btrace. */
2856 record_btrace_target::goto_record_end ()
2858 struct thread_info *tp;
2860 tp = require_btrace_thread ();
2862 record_btrace_set_replay (tp, NULL);
2865 /* The goto_record method of target record-btrace. */
2868 record_btrace_target::goto_record (ULONGEST insn)
2870 struct thread_info *tp;
2871 struct btrace_insn_iterator it;
2872 unsigned int number;
2877 /* Check for wrap-arounds. */
2879 error (_("Instruction number out of range."));
2881 tp = require_btrace_thread ();
2883 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2885 /* Check if the instruction could not be found or is a gap. */
2886 if (found == 0 || btrace_insn_get (&it) == NULL)
2887 error (_("No such instruction."));
2889 record_btrace_set_replay (tp, &it);
2892 /* The record_stop_replaying method of target record-btrace. */
2895 record_btrace_target::record_stop_replaying ()
2897 struct thread_info *tp;
2899 ALL_NON_EXITED_THREADS (tp)
2900 record_btrace_stop_replaying (tp);
2903 /* The execution_direction target method. */
2905 enum exec_direction_kind
2906 record_btrace_target::execution_direction ()
2908 return record_btrace_resume_exec_dir;
2911 /* The prepare_to_generate_core target method. */
2914 record_btrace_target::prepare_to_generate_core ()
2916 record_btrace_generating_corefile = 1;
2919 /* The done_generating_core target method. */
2922 record_btrace_target::done_generating_core ()
2924 record_btrace_generating_corefile = 0;
2927 /* Start recording in BTS format. */
2930 cmd_record_btrace_bts_start (const char *args, int from_tty)
2932 if (args != NULL && *args != 0)
2933 error (_("Invalid argument."));
2935 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2939 execute_command ("target record-btrace", from_tty);
2941 CATCH (exception, RETURN_MASK_ALL)
2943 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2944 throw_exception (exception);
2949 /* Start recording in Intel Processor Trace format. */
2952 cmd_record_btrace_pt_start (const char *args, int from_tty)
2954 if (args != NULL && *args != 0)
2955 error (_("Invalid argument."));
2957 record_btrace_conf.format = BTRACE_FORMAT_PT;
2961 execute_command ("target record-btrace", from_tty);
2963 CATCH (exception, RETURN_MASK_ALL)
2965 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2966 throw_exception (exception);
2971 /* Alias for "target record". */
2974 cmd_record_btrace_start (const char *args, int from_tty)
2976 if (args != NULL && *args != 0)
2977 error (_("Invalid argument."));
2979 record_btrace_conf.format = BTRACE_FORMAT_PT;
2983 execute_command ("target record-btrace", from_tty);
2985 CATCH (exception, RETURN_MASK_ALL)
2987 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2991 execute_command ("target record-btrace", from_tty);
2993 CATCH (exception, RETURN_MASK_ALL)
2995 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2996 throw_exception (exception);
3003 /* The "set record btrace" command. */
3006 cmd_set_record_btrace (const char *args, int from_tty)
3008 printf_unfiltered (_("\"set record btrace\" must be followed "
3009 "by an appropriate subcommand.\n"));
3010 help_list (set_record_btrace_cmdlist, "set record btrace ",
3011 all_commands, gdb_stdout);
3014 /* The "show record btrace" command. */
3017 cmd_show_record_btrace (const char *args, int from_tty)
3019 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3022 /* The "show record btrace replay-memory-access" command. */
3025 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3026 struct cmd_list_element *c, const char *value)
3028 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3029 replay_memory_access);
3032 /* The "set record btrace cpu none" command. */
3035 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3037 if (args != nullptr && *args != 0)
3038 error (_("Trailing junk: '%s'."), args);
3040 record_btrace_cpu_state = CS_NONE;
3043 /* The "set record btrace cpu auto" command. */
3046 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3048 if (args != nullptr && *args != 0)
3049 error (_("Trailing junk: '%s'."), args);
3051 record_btrace_cpu_state = CS_AUTO;
3054 /* The "set record btrace cpu" command. */
3057 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3059 if (args == nullptr)
3062 /* We use a hard-coded vendor string for now. */
3063 unsigned int family, model, stepping;
3064 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3065 &model, &l1, &stepping, &l2);
3068 if (strlen (args) != l2)
3069 error (_("Trailing junk: '%s'."), args + l2);
3071 else if (matches == 2)
3073 if (strlen (args) != l1)
3074 error (_("Trailing junk: '%s'."), args + l1);
3079 error (_("Bad format. See \"help set record btrace cpu\"."));
3081 if (USHRT_MAX < family)
3082 error (_("Cpu family too big."));
3084 if (UCHAR_MAX < model)
3085 error (_("Cpu model too big."));
3087 if (UCHAR_MAX < stepping)
3088 error (_("Cpu stepping too big."));
3090 record_btrace_cpu.vendor = CV_INTEL;
3091 record_btrace_cpu.family = family;
3092 record_btrace_cpu.model = model;
3093 record_btrace_cpu.stepping = stepping;
3095 record_btrace_cpu_state = CS_CPU;
3098 /* The "show record btrace cpu" command. */
3101 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3105 if (args != nullptr && *args != 0)
3106 error (_("Trailing junk: '%s'."), args);
3108 switch (record_btrace_cpu_state)
3111 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3115 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3119 switch (record_btrace_cpu.vendor)
3122 if (record_btrace_cpu.stepping == 0)
3123 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3124 record_btrace_cpu.family,
3125 record_btrace_cpu.model);
3127 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3128 record_btrace_cpu.family,
3129 record_btrace_cpu.model,
3130 record_btrace_cpu.stepping);
3135 error (_("Internal error: bad cpu state."));
3138 /* The "s record btrace bts" command. */
3141 cmd_set_record_btrace_bts (const char *args, int from_tty)
3143 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3144 "by an appropriate subcommand.\n"));
3145 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3146 all_commands, gdb_stdout);
3149 /* The "show record btrace bts" command. */
3152 cmd_show_record_btrace_bts (const char *args, int from_tty)
3154 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3157 /* The "set record btrace pt" command. */
3160 cmd_set_record_btrace_pt (const char *args, int from_tty)
3162 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3163 "by an appropriate subcommand.\n"));
3164 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3165 all_commands, gdb_stdout);
3168 /* The "show record btrace pt" command. */
3171 cmd_show_record_btrace_pt (const char *args, int from_tty)
3173 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3176 /* The "record bts buffer-size" show value function. */
3179 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3180 struct cmd_list_element *c,
3183 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3187 /* The "record pt buffer-size" show value function. */
3190 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3191 struct cmd_list_element *c,
3194 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3198 /* Initialize btrace commands. */
3201 _initialize_record_btrace (void)
3203 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3204 _("Start branch trace recording."), &record_btrace_cmdlist,
3205 "record btrace ", 0, &record_cmdlist);
3206 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3208 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3210 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3211 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3212 This format may not be available on all processors."),
3213 &record_btrace_cmdlist);
3214 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3216 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3218 Start branch trace recording in Intel Processor Trace format.\n\n\
3219 This format may not be available on all processors."),
3220 &record_btrace_cmdlist);
3221 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3223 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3224 _("Set record options"), &set_record_btrace_cmdlist,
3225 "set record btrace ", 0, &set_record_cmdlist);
3227 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3228 _("Show record options"), &show_record_btrace_cmdlist,
3229 "show record btrace ", 0, &show_record_cmdlist);
3231 add_setshow_enum_cmd ("replay-memory-access", no_class,
3232 replay_memory_access_types, &replay_memory_access, _("\
3233 Set what memory accesses are allowed during replay."), _("\
3234 Show what memory accesses are allowed during replay."),
3235 _("Default is READ-ONLY.\n\n\
3236 The btrace record target does not trace data.\n\
3237 The memory therefore corresponds to the live target and not \
3238 to the current replay position.\n\n\
3239 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3240 When READ-WRITE, allow accesses to read-only and read-write memory during \
3242 NULL, cmd_show_replay_memory_access,
3243 &set_record_btrace_cmdlist,
3244 &show_record_btrace_cmdlist);
3246 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3248 Set the cpu to be used for trace decode.\n\n\
3249 The format is \"<vendor>:<identifier>\" or \"none\" or \"auto\" (default).\n\
3250 For vendor \"intel\" the format is \"<family>/<model>[/<stepping>]\".\n\n\
3251 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3252 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3253 When GDB does not support that cpu, this option can be used to enable\n\
3254 workarounds for a similar cpu that GDB supports.\n\n\
3255 When set to \"none\", errata workarounds are disabled."),
3256 &set_record_btrace_cpu_cmdlist,
3257 _("set record btrace cpu "), 1,
3258 &set_record_btrace_cmdlist);
3260 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3261 Automatically determine the cpu to be used for trace decode."),
3262 &set_record_btrace_cpu_cmdlist);
3264 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3265 Do not enable errata workarounds for trace decode."),
3266 &set_record_btrace_cpu_cmdlist);
3268 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3269 Show the cpu to be used for trace decode."),
3270 &show_record_btrace_cmdlist);
3272 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3273 _("Set record btrace bts options"),
3274 &set_record_btrace_bts_cmdlist,
3275 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3277 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3278 _("Show record btrace bts options"),
3279 &show_record_btrace_bts_cmdlist,
3280 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3282 add_setshow_uinteger_cmd ("buffer-size", no_class,
3283 &record_btrace_conf.bts.size,
3284 _("Set the record/replay bts buffer size."),
3285 _("Show the record/replay bts buffer size."), _("\
3286 When starting recording request a trace buffer of this size. \
3287 The actual buffer size may differ from the requested size. \
3288 Use \"info record\" to see the actual buffer size.\n\n\
3289 Bigger buffers allow longer recording but also take more time to process \
3290 the recorded execution trace.\n\n\
3291 The trace buffer size may not be changed while recording."), NULL,
3292 show_record_bts_buffer_size_value,
3293 &set_record_btrace_bts_cmdlist,
3294 &show_record_btrace_bts_cmdlist);
3296 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3297 _("Set record btrace pt options"),
3298 &set_record_btrace_pt_cmdlist,
3299 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3301 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3302 _("Show record btrace pt options"),
3303 &show_record_btrace_pt_cmdlist,
3304 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3306 add_setshow_uinteger_cmd ("buffer-size", no_class,
3307 &record_btrace_conf.pt.size,
3308 _("Set the record/replay pt buffer size."),
3309 _("Show the record/replay pt buffer size."), _("\
3310 Bigger buffers allow longer recording but also take more time to process \
3311 the recorded execution.\n\
3312 The actual buffer size may differ from the requested size. Use \"info record\" \
3313 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3314 &set_record_btrace_pt_cmdlist,
3315 &show_record_btrace_pt_cmdlist);
3317 add_target (&record_btrace_ops);
3319 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3322 record_btrace_conf.bts.size = 64 * 1024;
3323 record_btrace_conf.pt.size = 16 * 1024;