1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
47 /* Token associated with a new-thread observer enabling branch tracing
48 for the new thread. */
49 static const gdb::observers::token record_btrace_thread_observer_token;
51 /* Memory access types used in set/show record btrace replay-memory-access. */
52 static const char replay_memory_access_read_only[] = "read-only";
53 static const char replay_memory_access_read_write[] = "read-write";
54 static const char *const replay_memory_access_types[] =
56 replay_memory_access_read_only,
57 replay_memory_access_read_write,
61 /* The currently allowed replay memory access type. */
62 static const char *replay_memory_access = replay_memory_access_read_only;
64 /* The cpu state kinds. */
65 enum record_btrace_cpu_state_kind
72 /* The current cpu state. */
73 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
75 /* The current cpu for trace decode. */
76 static struct btrace_cpu record_btrace_cpu;
78 /* Command lists for "set/show record btrace". */
79 static struct cmd_list_element *set_record_btrace_cmdlist;
80 static struct cmd_list_element *show_record_btrace_cmdlist;
82 /* The execution direction of the last resume we got. See record-full.c. */
83 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
85 /* The async event handler for reverse/replay execution. */
86 static struct async_event_handler *record_btrace_async_inferior_event_handler;
88 /* A flag indicating that we are currently generating a core file. */
89 static int record_btrace_generating_corefile;
91 /* The current branch trace configuration. */
92 static struct btrace_config record_btrace_conf;
94 /* Command list for "record btrace". */
95 static struct cmd_list_element *record_btrace_cmdlist;
97 /* Command lists for "set/show record btrace bts". */
98 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
99 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
101 /* Command lists for "set/show record btrace pt". */
102 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
103 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
105 /* Command list for "set record btrace cpu". */
106 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
108 /* Print a record-btrace debug message. Use do ... while (0) to avoid
109 ambiguities when used in if statements. */
111 #define DEBUG(msg, args...) \
114 if (record_debug != 0) \
115 fprintf_unfiltered (gdb_stdlog, \
116 "[record-btrace] " msg "\n", ##args); \
121 /* Return the cpu configured by the user. Returns NULL if the cpu was
122 configured as auto. */
123 const struct btrace_cpu *
124 record_btrace_get_cpu (void)
126 switch (record_btrace_cpu_state)
132 record_btrace_cpu.vendor = CV_UNKNOWN;
135 return &record_btrace_cpu;
138 error (_("Internal error: bad record btrace cpu state."));
141 /* Update the branch trace for the current thread and return a pointer to its
144 Throws an error if there is no thread or no trace. This function never
147 static struct thread_info *
148 require_btrace_thread (void)
150 struct thread_info *tp;
154 tp = find_thread_ptid (inferior_ptid);
156 error (_("No thread."));
158 validate_registers_access ();
160 btrace_fetch (tp, record_btrace_get_cpu ());
162 if (btrace_is_empty (tp))
163 error (_("No trace."));
168 /* Update the branch trace for the current thread and return a pointer to its
169 branch trace information struct.
171 Throws an error if there is no thread or no trace. This function never
174 static struct btrace_thread_info *
175 require_btrace (void)
177 struct thread_info *tp;
179 tp = require_btrace_thread ();
184 /* Enable branch tracing for one thread. Warn on errors. */
187 record_btrace_enable_warn (struct thread_info *tp)
191 btrace_enable (tp, &record_btrace_conf);
193 CATCH (error, RETURN_MASK_ERROR)
195 warning ("%s", error.message);
200 /* Enable automatic tracing of new threads. */
203 record_btrace_auto_enable (void)
205 DEBUG ("attach thread observer");
207 gdb::observers::new_thread.attach (record_btrace_enable_warn,
208 record_btrace_thread_observer_token);
211 /* Disable automatic tracing of new threads. */
214 record_btrace_auto_disable (void)
216 DEBUG ("detach thread observer");
218 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
221 /* The record-btrace async event handler function. */
224 record_btrace_handle_async_inferior_event (gdb_client_data data)
226 inferior_event_handler (INF_REG_EVENT, NULL);
229 /* See record-btrace.h. */
232 record_btrace_push_target (void)
236 record_btrace_auto_enable ();
238 push_target (&record_btrace_ops);
240 record_btrace_async_inferior_event_handler
241 = create_async_event_handler (record_btrace_handle_async_inferior_event,
243 record_btrace_generating_corefile = 0;
245 format = btrace_format_short_string (record_btrace_conf.format);
246 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
249 /* Disable btrace on a set of threads on scope exit. */
251 struct scoped_btrace_disable
253 scoped_btrace_disable () = default;
255 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
257 ~scoped_btrace_disable ()
259 for (thread_info *tp : m_threads)
263 void add_thread (thread_info *thread)
265 m_threads.push_front (thread);
274 std::forward_list<thread_info *> m_threads;
277 /* The to_open method of target record-btrace. */
280 record_btrace_open (const char *args, int from_tty)
282 /* If we fail to enable btrace for one thread, disable it for the threads for
283 which it was successfully enabled. */
284 scoped_btrace_disable btrace_disable;
285 struct thread_info *tp;
291 if (!target_has_execution)
292 error (_("The program is not being run."));
294 ALL_NON_EXITED_THREADS (tp)
295 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
297 btrace_enable (tp, &record_btrace_conf);
299 btrace_disable.add_thread (tp);
302 record_btrace_push_target ();
304 btrace_disable.discard ();
307 /* The to_stop_recording method of target record-btrace. */
310 record_btrace_stop_recording (struct target_ops *self)
312 struct thread_info *tp;
314 DEBUG ("stop recording");
316 record_btrace_auto_disable ();
318 ALL_NON_EXITED_THREADS (tp)
319 if (tp->btrace.target != NULL)
323 /* The to_disconnect method of target record-btrace. */
326 record_btrace_disconnect (struct target_ops *self, const char *args,
329 struct target_ops *beneath = self->beneath;
331 /* Do not stop recording, just clean up GDB side. */
332 unpush_target (self);
334 /* Forward disconnect. */
335 beneath->to_disconnect (beneath, args, from_tty);
338 /* The to_close method of target record-btrace. */
341 record_btrace_close (struct target_ops *self)
343 struct thread_info *tp;
345 if (record_btrace_async_inferior_event_handler != NULL)
346 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
348 /* Make sure automatic recording gets disabled even if we did not stop
349 recording before closing the record-btrace target. */
350 record_btrace_auto_disable ();
352 /* We should have already stopped recording.
353 Tear down btrace in case we have not. */
354 ALL_NON_EXITED_THREADS (tp)
355 btrace_teardown (tp);
358 /* The to_async method of target record-btrace. */
361 record_btrace_async (struct target_ops *ops, int enable)
364 mark_async_event_handler (record_btrace_async_inferior_event_handler);
366 clear_async_event_handler (record_btrace_async_inferior_event_handler);
368 ops->beneath->to_async (ops->beneath, enable);
371 /* Adjusts the size and returns a human readable size suffix. */
374 record_btrace_adjust_size (unsigned int *size)
380 if ((sz & ((1u << 30) - 1)) == 0)
385 else if ((sz & ((1u << 20) - 1)) == 0)
390 else if ((sz & ((1u << 10) - 1)) == 0)
399 /* Print a BTS configuration. */
402 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
410 suffix = record_btrace_adjust_size (&size);
411 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
415 /* Print an Intel Processor Trace configuration. */
418 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
426 suffix = record_btrace_adjust_size (&size);
427 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
431 /* Print a branch tracing configuration. */
434 record_btrace_print_conf (const struct btrace_config *conf)
436 printf_unfiltered (_("Recording format: %s.\n"),
437 btrace_format_string (conf->format));
439 switch (conf->format)
441 case BTRACE_FORMAT_NONE:
444 case BTRACE_FORMAT_BTS:
445 record_btrace_print_bts_conf (&conf->bts);
448 case BTRACE_FORMAT_PT:
449 record_btrace_print_pt_conf (&conf->pt);
453 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
456 /* The to_info_record method of target record-btrace. */
459 record_btrace_info (struct target_ops *self)
461 struct btrace_thread_info *btinfo;
462 const struct btrace_config *conf;
463 struct thread_info *tp;
464 unsigned int insns, calls, gaps;
468 tp = find_thread_ptid (inferior_ptid);
470 error (_("No thread."));
472 validate_registers_access ();
474 btinfo = &tp->btrace;
476 conf = btrace_conf (btinfo);
478 record_btrace_print_conf (conf);
480 btrace_fetch (tp, record_btrace_get_cpu ());
486 if (!btrace_is_empty (tp))
488 struct btrace_call_iterator call;
489 struct btrace_insn_iterator insn;
491 btrace_call_end (&call, btinfo);
492 btrace_call_prev (&call, 1);
493 calls = btrace_call_number (&call);
495 btrace_insn_end (&insn, btinfo);
496 insns = btrace_insn_number (&insn);
498 /* If the last instruction is not a gap, it is the current instruction
499 that is not actually part of the record. */
500 if (btrace_insn_get (&insn) != NULL)
503 gaps = btinfo->ngaps;
506 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
507 "for thread %s (%s).\n"), insns, calls, gaps,
508 print_thread_id (tp), target_pid_to_str (tp->ptid));
510 if (btrace_is_replaying (tp))
511 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
512 btrace_insn_number (btinfo->replay));
515 /* Print a decode error. */
518 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
519 enum btrace_format format)
521 const char *errstr = btrace_decode_error (format, errcode);
523 uiout->text (_("["));
524 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
525 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
527 uiout->text (_("decode error ("));
528 uiout->field_int ("errcode", errcode);
529 uiout->text (_("): "));
531 uiout->text (errstr);
532 uiout->text (_("]\n"));
535 /* Print an unsigned int. */
538 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
540 uiout->field_fmt (fld, "%u", val);
543 /* A range of source lines. */
545 struct btrace_line_range
547 /* The symtab this line is from. */
548 struct symtab *symtab;
550 /* The first line (inclusive). */
553 /* The last line (exclusive). */
557 /* Construct a line range. */
559 static struct btrace_line_range
560 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
562 struct btrace_line_range range;
564 range.symtab = symtab;
571 /* Add a line to a line range. */
573 static struct btrace_line_range
574 btrace_line_range_add (struct btrace_line_range range, int line)
576 if (range.end <= range.begin)
578 /* This is the first entry. */
580 range.end = line + 1;
582 else if (line < range.begin)
584 else if (range.end < line)
590 /* Return non-zero if RANGE is empty, zero otherwise. */
593 btrace_line_range_is_empty (struct btrace_line_range range)
595 return range.end <= range.begin;
598 /* Return non-zero if LHS contains RHS, zero otherwise. */
601 btrace_line_range_contains_range (struct btrace_line_range lhs,
602 struct btrace_line_range rhs)
604 return ((lhs.symtab == rhs.symtab)
605 && (lhs.begin <= rhs.begin)
606 && (rhs.end <= lhs.end));
609 /* Find the line range associated with PC. */
611 static struct btrace_line_range
612 btrace_find_line_range (CORE_ADDR pc)
614 struct btrace_line_range range;
615 struct linetable_entry *lines;
616 struct linetable *ltable;
617 struct symtab *symtab;
620 symtab = find_pc_line_symtab (pc);
622 return btrace_mk_line_range (NULL, 0, 0);
624 ltable = SYMTAB_LINETABLE (symtab);
626 return btrace_mk_line_range (symtab, 0, 0);
628 nlines = ltable->nitems;
629 lines = ltable->item;
631 return btrace_mk_line_range (symtab, 0, 0);
633 range = btrace_mk_line_range (symtab, 0, 0);
634 for (i = 0; i < nlines - 1; i++)
636 if ((lines[i].pc == pc) && (lines[i].line != 0))
637 range = btrace_line_range_add (range, lines[i].line);
643 /* Print source lines in LINES to UIOUT.
645 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
646 instructions corresponding to that source line. When printing a new source
647 line, we do the cleanups for the open chain and open a new cleanup chain for
648 the new source line. If the source line range in LINES is not empty, this
649 function will leave the cleanup chain for the last printed source line open
650 so instructions can be added to it. */
653 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
654 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
655 gdb::optional<ui_out_emit_list> *asm_list,
656 gdb_disassembly_flags flags)
658 print_source_lines_flags psl_flags;
660 if (flags & DISASSEMBLY_FILENAME)
661 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
663 for (int line = lines.begin; line < lines.end; ++line)
667 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
669 print_source_lines (lines.symtab, line, line + 1, psl_flags);
671 asm_list->emplace (uiout, "line_asm_insn");
675 /* Disassemble a section of the recorded instruction trace. */
678 btrace_insn_history (struct ui_out *uiout,
679 const struct btrace_thread_info *btinfo,
680 const struct btrace_insn_iterator *begin,
681 const struct btrace_insn_iterator *end,
682 gdb_disassembly_flags flags)
684 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
685 btrace_insn_number (begin), btrace_insn_number (end));
687 flags |= DISASSEMBLY_SPECULATIVE;
689 struct gdbarch *gdbarch = target_gdbarch ();
690 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
692 ui_out_emit_list list_emitter (uiout, "asm_insns");
694 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
695 gdb::optional<ui_out_emit_list> asm_list;
697 gdb_pretty_print_disassembler disasm (gdbarch);
699 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
700 btrace_insn_next (&it, 1))
702 const struct btrace_insn *insn;
704 insn = btrace_insn_get (&it);
706 /* A NULL instruction indicates a gap in the trace. */
709 const struct btrace_config *conf;
711 conf = btrace_conf (btinfo);
713 /* We have trace so we must have a configuration. */
714 gdb_assert (conf != NULL);
716 uiout->field_fmt ("insn-number", "%u",
717 btrace_insn_number (&it));
720 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
725 struct disasm_insn dinsn;
727 if ((flags & DISASSEMBLY_SOURCE) != 0)
729 struct btrace_line_range lines;
731 lines = btrace_find_line_range (insn->pc);
732 if (!btrace_line_range_is_empty (lines)
733 && !btrace_line_range_contains_range (last_lines, lines))
735 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
739 else if (!src_and_asm_tuple.has_value ())
741 gdb_assert (!asm_list.has_value ());
743 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
745 /* No source information. */
746 asm_list.emplace (uiout, "line_asm_insn");
749 gdb_assert (src_and_asm_tuple.has_value ());
750 gdb_assert (asm_list.has_value ());
753 memset (&dinsn, 0, sizeof (dinsn));
754 dinsn.number = btrace_insn_number (&it);
755 dinsn.addr = insn->pc;
757 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
758 dinsn.is_speculative = 1;
760 disasm.pretty_print_insn (uiout, &dinsn, flags);
765 /* The to_insn_history method of target record-btrace. */
768 record_btrace_insn_history (struct target_ops *self, int size,
769 gdb_disassembly_flags flags)
771 struct btrace_thread_info *btinfo;
772 struct btrace_insn_history *history;
773 struct btrace_insn_iterator begin, end;
774 struct ui_out *uiout;
775 unsigned int context, covered;
777 uiout = current_uiout;
778 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
779 context = abs (size);
781 error (_("Bad record instruction-history-size."));
783 btinfo = require_btrace ();
784 history = btinfo->insn_history;
787 struct btrace_insn_iterator *replay;
789 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
791 /* If we're replaying, we start at the replay position. Otherwise, we
792 start at the tail of the trace. */
793 replay = btinfo->replay;
797 btrace_insn_end (&begin, btinfo);
799 /* We start from here and expand in the requested direction. Then we
800 expand in the other direction, as well, to fill up any remaining
805 /* We want the current position covered, as well. */
806 covered = btrace_insn_next (&end, 1);
807 covered += btrace_insn_prev (&begin, context - covered);
808 covered += btrace_insn_next (&end, context - covered);
812 covered = btrace_insn_next (&end, context);
813 covered += btrace_insn_prev (&begin, context - covered);
818 begin = history->begin;
821 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
822 btrace_insn_number (&begin), btrace_insn_number (&end));
827 covered = btrace_insn_prev (&begin, context);
832 covered = btrace_insn_next (&end, context);
837 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
841 printf_unfiltered (_("At the start of the branch trace record.\n"));
843 printf_unfiltered (_("At the end of the branch trace record.\n"));
846 btrace_set_insn_history (btinfo, &begin, &end);
849 /* The to_insn_history_range method of target record-btrace. */
852 record_btrace_insn_history_range (struct target_ops *self,
853 ULONGEST from, ULONGEST to,
854 gdb_disassembly_flags flags)
856 struct btrace_thread_info *btinfo;
857 struct btrace_insn_iterator begin, end;
858 struct ui_out *uiout;
859 unsigned int low, high;
862 uiout = current_uiout;
863 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
867 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
869 /* Check for wrap-arounds. */
870 if (low != from || high != to)
871 error (_("Bad range."));
874 error (_("Bad range."));
876 btinfo = require_btrace ();
878 found = btrace_find_insn_by_number (&begin, btinfo, low);
880 error (_("Range out of bounds."));
882 found = btrace_find_insn_by_number (&end, btinfo, high);
885 /* Silently truncate the range. */
886 btrace_insn_end (&end, btinfo);
890 /* We want both begin and end to be inclusive. */
891 btrace_insn_next (&end, 1);
894 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
895 btrace_set_insn_history (btinfo, &begin, &end);
898 /* The to_insn_history_from method of target record-btrace. */
901 record_btrace_insn_history_from (struct target_ops *self,
902 ULONGEST from, int size,
903 gdb_disassembly_flags flags)
905 ULONGEST begin, end, context;
907 context = abs (size);
909 error (_("Bad record instruction-history-size."));
918 begin = from - context + 1;
923 end = from + context - 1;
925 /* Check for wrap-around. */
930 record_btrace_insn_history_range (self, begin, end, flags);
933 /* Print the instruction number range for a function call history line. */
936 btrace_call_history_insn_range (struct ui_out *uiout,
937 const struct btrace_function *bfun)
939 unsigned int begin, end, size;
941 size = bfun->insn.size ();
942 gdb_assert (size > 0);
944 begin = bfun->insn_offset;
945 end = begin + size - 1;
947 ui_out_field_uint (uiout, "insn begin", begin);
949 ui_out_field_uint (uiout, "insn end", end);
952 /* Compute the lowest and highest source line for the instructions in BFUN
953 and return them in PBEGIN and PEND.
954 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
955 result from inlining or macro expansion. */
958 btrace_compute_src_line_range (const struct btrace_function *bfun,
959 int *pbegin, int *pend)
961 struct symtab *symtab;
972 symtab = symbol_symtab (sym);
974 for (const btrace_insn &insn : bfun->insn)
976 struct symtab_and_line sal;
978 sal = find_pc_line (insn.pc, 0);
979 if (sal.symtab != symtab || sal.line == 0)
982 begin = std::min (begin, sal.line);
983 end = std::max (end, sal.line);
991 /* Print the source line information for a function call history line. */
994 btrace_call_history_src_line (struct ui_out *uiout,
995 const struct btrace_function *bfun)
1004 uiout->field_string ("file",
1005 symtab_to_filename_for_display (symbol_symtab (sym)));
1007 btrace_compute_src_line_range (bfun, &begin, &end);
1012 uiout->field_int ("min line", begin);
1018 uiout->field_int ("max line", end);
1021 /* Get the name of a branch trace function. */
1024 btrace_get_bfun_name (const struct btrace_function *bfun)
1026 struct minimal_symbol *msym;
1036 return SYMBOL_PRINT_NAME (sym);
1037 else if (msym != NULL)
1038 return MSYMBOL_PRINT_NAME (msym);
1043 /* Disassemble a section of the recorded function trace. */
1046 btrace_call_history (struct ui_out *uiout,
1047 const struct btrace_thread_info *btinfo,
1048 const struct btrace_call_iterator *begin,
1049 const struct btrace_call_iterator *end,
1052 struct btrace_call_iterator it;
1053 record_print_flags flags = (enum record_print_flag) int_flags;
1055 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1056 btrace_call_number (end));
1058 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1060 const struct btrace_function *bfun;
1061 struct minimal_symbol *msym;
1064 bfun = btrace_call_get (&it);
1068 /* Print the function index. */
1069 ui_out_field_uint (uiout, "index", bfun->number);
1072 /* Indicate gaps in the trace. */
1073 if (bfun->errcode != 0)
1075 const struct btrace_config *conf;
1077 conf = btrace_conf (btinfo);
1079 /* We have trace so we must have a configuration. */
1080 gdb_assert (conf != NULL);
1082 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1087 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1089 int level = bfun->level + btinfo->level, i;
1091 for (i = 0; i < level; ++i)
1096 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1097 else if (msym != NULL)
1098 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1099 else if (!uiout->is_mi_like_p ())
1100 uiout->field_string ("function", "??");
1102 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1104 uiout->text (_("\tinst "));
1105 btrace_call_history_insn_range (uiout, bfun);
1108 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1110 uiout->text (_("\tat "));
1111 btrace_call_history_src_line (uiout, bfun);
1118 /* The to_call_history method of target record-btrace. */
1121 record_btrace_call_history (struct target_ops *self, int size,
1122 record_print_flags flags)
1124 struct btrace_thread_info *btinfo;
1125 struct btrace_call_history *history;
1126 struct btrace_call_iterator begin, end;
1127 struct ui_out *uiout;
1128 unsigned int context, covered;
1130 uiout = current_uiout;
1131 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1132 context = abs (size);
1134 error (_("Bad record function-call-history-size."));
1136 btinfo = require_btrace ();
1137 history = btinfo->call_history;
1138 if (history == NULL)
1140 struct btrace_insn_iterator *replay;
1142 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1144 /* If we're replaying, we start at the replay position. Otherwise, we
1145 start at the tail of the trace. */
1146 replay = btinfo->replay;
1149 begin.btinfo = btinfo;
1150 begin.index = replay->call_index;
1153 btrace_call_end (&begin, btinfo);
1155 /* We start from here and expand in the requested direction. Then we
1156 expand in the other direction, as well, to fill up any remaining
1161 /* We want the current position covered, as well. */
1162 covered = btrace_call_next (&end, 1);
1163 covered += btrace_call_prev (&begin, context - covered);
1164 covered += btrace_call_next (&end, context - covered);
1168 covered = btrace_call_next (&end, context);
1169 covered += btrace_call_prev (&begin, context- covered);
1174 begin = history->begin;
1177 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1178 btrace_call_number (&begin), btrace_call_number (&end));
1183 covered = btrace_call_prev (&begin, context);
1188 covered = btrace_call_next (&end, context);
1193 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1197 printf_unfiltered (_("At the start of the branch trace record.\n"));
1199 printf_unfiltered (_("At the end of the branch trace record.\n"));
1202 btrace_set_call_history (btinfo, &begin, &end);
1205 /* The to_call_history_range method of target record-btrace. */
1208 record_btrace_call_history_range (struct target_ops *self,
1209 ULONGEST from, ULONGEST to,
1210 record_print_flags flags)
1212 struct btrace_thread_info *btinfo;
1213 struct btrace_call_iterator begin, end;
1214 struct ui_out *uiout;
1215 unsigned int low, high;
1218 uiout = current_uiout;
1219 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1223 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1225 /* Check for wrap-arounds. */
1226 if (low != from || high != to)
1227 error (_("Bad range."));
1230 error (_("Bad range."));
1232 btinfo = require_btrace ();
1234 found = btrace_find_call_by_number (&begin, btinfo, low);
1236 error (_("Range out of bounds."));
1238 found = btrace_find_call_by_number (&end, btinfo, high);
1241 /* Silently truncate the range. */
1242 btrace_call_end (&end, btinfo);
1246 /* We want both begin and end to be inclusive. */
1247 btrace_call_next (&end, 1);
1250 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1251 btrace_set_call_history (btinfo, &begin, &end);
1254 /* The to_call_history_from method of target record-btrace. */
1257 record_btrace_call_history_from (struct target_ops *self,
1258 ULONGEST from, int size,
1259 record_print_flags flags)
1261 ULONGEST begin, end, context;
1263 context = abs (size);
1265 error (_("Bad record function-call-history-size."));
1274 begin = from - context + 1;
1279 end = from + context - 1;
1281 /* Check for wrap-around. */
1286 record_btrace_call_history_range (self, begin, end, flags);
1289 /* The to_record_method method of target record-btrace. */
1291 static enum record_method
1292 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1294 struct thread_info * const tp = find_thread_ptid (ptid);
1297 error (_("No thread."));
1299 if (tp->btrace.target == NULL)
1300 return RECORD_METHOD_NONE;
1302 return RECORD_METHOD_BTRACE;
1305 /* The to_record_is_replaying method of target record-btrace. */
1308 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1310 struct thread_info *tp;
1312 ALL_NON_EXITED_THREADS (tp)
1313 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1319 /* The to_record_will_replay method of target record-btrace. */
1322 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1324 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1327 /* The to_xfer_partial method of target record-btrace. */
1329 static enum target_xfer_status
1330 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1331 const char *annex, gdb_byte *readbuf,
1332 const gdb_byte *writebuf, ULONGEST offset,
1333 ULONGEST len, ULONGEST *xfered_len)
1335 /* Filter out requests that don't make sense during replay. */
1336 if (replay_memory_access == replay_memory_access_read_only
1337 && !record_btrace_generating_corefile
1338 && record_btrace_is_replaying (ops, inferior_ptid))
1342 case TARGET_OBJECT_MEMORY:
1344 struct target_section *section;
1346 /* We do not allow writing memory in general. */
1347 if (writebuf != NULL)
1350 return TARGET_XFER_UNAVAILABLE;
1353 /* We allow reading readonly memory. */
1354 section = target_section_by_addr (ops, offset);
1355 if (section != NULL)
1357 /* Check if the section we found is readonly. */
1358 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1359 section->the_bfd_section)
1360 & SEC_READONLY) != 0)
1362 /* Truncate the request to fit into this section. */
1363 len = std::min (len, section->endaddr - offset);
1369 return TARGET_XFER_UNAVAILABLE;
1374 /* Forward the request. */
1376 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1377 offset, len, xfered_len);
1380 /* The to_insert_breakpoint method of target record-btrace. */
1383 record_btrace_insert_breakpoint (struct target_ops *ops,
1384 struct gdbarch *gdbarch,
1385 struct bp_target_info *bp_tgt)
1390 /* Inserting breakpoints requires accessing memory. Allow it for the
1391 duration of this function. */
1392 old = replay_memory_access;
1393 replay_memory_access = replay_memory_access_read_write;
1398 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1400 CATCH (except, RETURN_MASK_ALL)
1402 replay_memory_access = old;
1403 throw_exception (except);
1406 replay_memory_access = old;
1411 /* The to_remove_breakpoint method of target record-btrace. */
1414 record_btrace_remove_breakpoint (struct target_ops *ops,
1415 struct gdbarch *gdbarch,
1416 struct bp_target_info *bp_tgt,
1417 enum remove_bp_reason reason)
1422 /* Removing breakpoints requires accessing memory. Allow it for the
1423 duration of this function. */
1424 old = replay_memory_access;
1425 replay_memory_access = replay_memory_access_read_write;
1430 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1433 CATCH (except, RETURN_MASK_ALL)
1435 replay_memory_access = old;
1436 throw_exception (except);
1439 replay_memory_access = old;
1444 /* The to_fetch_registers method of target record-btrace. */
1447 record_btrace_fetch_registers (struct target_ops *ops,
1448 struct regcache *regcache, int regno)
1450 struct btrace_insn_iterator *replay;
1451 struct thread_info *tp;
1453 tp = find_thread_ptid (regcache_get_ptid (regcache));
1454 gdb_assert (tp != NULL);
1456 replay = tp->btrace.replay;
1457 if (replay != NULL && !record_btrace_generating_corefile)
1459 const struct btrace_insn *insn;
1460 struct gdbarch *gdbarch;
1463 gdbarch = regcache->arch ();
1464 pcreg = gdbarch_pc_regnum (gdbarch);
1468 /* We can only provide the PC register. */
1469 if (regno >= 0 && regno != pcreg)
1472 insn = btrace_insn_get (replay);
1473 gdb_assert (insn != NULL);
1475 regcache_raw_supply (regcache, regno, &insn->pc);
1479 struct target_ops *t = ops->beneath;
1481 t->to_fetch_registers (t, regcache, regno);
1485 /* The to_store_registers method of target record-btrace. */
1488 record_btrace_store_registers (struct target_ops *ops,
1489 struct regcache *regcache, int regno)
1491 struct target_ops *t;
1493 if (!record_btrace_generating_corefile
1494 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1495 error (_("Cannot write registers while replaying."));
1497 gdb_assert (may_write_registers != 0);
1500 t->to_store_registers (t, regcache, regno);
1503 /* The to_prepare_to_store method of target record-btrace. */
1506 record_btrace_prepare_to_store (struct target_ops *ops,
1507 struct regcache *regcache)
1509 struct target_ops *t;
1511 if (!record_btrace_generating_corefile
1512 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1516 t->to_prepare_to_store (t, regcache);
1519 /* The branch trace frame cache. */
1521 struct btrace_frame_cache
1524 struct thread_info *tp;
1526 /* The frame info. */
1527 struct frame_info *frame;
1529 /* The branch trace function segment. */
1530 const struct btrace_function *bfun;
1533 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1535 static htab_t bfcache;
1537 /* hash_f for htab_create_alloc of bfcache. */
1540 bfcache_hash (const void *arg)
1542 const struct btrace_frame_cache *cache
1543 = (const struct btrace_frame_cache *) arg;
1545 return htab_hash_pointer (cache->frame);
1548 /* eq_f for htab_create_alloc of bfcache. */
1551 bfcache_eq (const void *arg1, const void *arg2)
1553 const struct btrace_frame_cache *cache1
1554 = (const struct btrace_frame_cache *) arg1;
1555 const struct btrace_frame_cache *cache2
1556 = (const struct btrace_frame_cache *) arg2;
1558 return cache1->frame == cache2->frame;
1561 /* Create a new btrace frame cache. */
1563 static struct btrace_frame_cache *
1564 bfcache_new (struct frame_info *frame)
1566 struct btrace_frame_cache *cache;
1569 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1570 cache->frame = frame;
1572 slot = htab_find_slot (bfcache, cache, INSERT);
1573 gdb_assert (*slot == NULL);
1579 /* Extract the branch trace function from a branch trace frame. */
1581 static const struct btrace_function *
1582 btrace_get_frame_function (struct frame_info *frame)
1584 const struct btrace_frame_cache *cache;
1585 struct btrace_frame_cache pattern;
1588 pattern.frame = frame;
1590 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1594 cache = (const struct btrace_frame_cache *) *slot;
1598 /* Implement stop_reason method for record_btrace_frame_unwind. */
1600 static enum unwind_stop_reason
1601 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1604 const struct btrace_frame_cache *cache;
1605 const struct btrace_function *bfun;
1607 cache = (const struct btrace_frame_cache *) *this_cache;
1609 gdb_assert (bfun != NULL);
1612 return UNWIND_UNAVAILABLE;
1614 return UNWIND_NO_REASON;
1617 /* Implement this_id method for record_btrace_frame_unwind. */
1620 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1621 struct frame_id *this_id)
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun;
1625 struct btrace_call_iterator it;
1626 CORE_ADDR code, special;
1628 cache = (const struct btrace_frame_cache *) *this_cache;
1631 gdb_assert (bfun != NULL);
1633 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1634 bfun = btrace_call_get (&it);
1636 code = get_frame_func (this_frame);
1637 special = bfun->number;
1639 *this_id = frame_id_build_unavailable_stack_special (code, special);
1641 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1642 btrace_get_bfun_name (cache->bfun),
1643 core_addr_to_string_nz (this_id->code_addr),
1644 core_addr_to_string_nz (this_id->special_addr));
1647 /* Implement prev_register method for record_btrace_frame_unwind. */
1649 static struct value *
1650 record_btrace_frame_prev_register (struct frame_info *this_frame,
1654 const struct btrace_frame_cache *cache;
1655 const struct btrace_function *bfun, *caller;
1656 struct btrace_call_iterator it;
1657 struct gdbarch *gdbarch;
1661 gdbarch = get_frame_arch (this_frame);
1662 pcreg = gdbarch_pc_regnum (gdbarch);
1663 if (pcreg < 0 || regnum != pcreg)
1664 throw_error (NOT_AVAILABLE_ERROR,
1665 _("Registers are not available in btrace record history"));
1667 cache = (const struct btrace_frame_cache *) *this_cache;
1669 gdb_assert (bfun != NULL);
1671 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1672 throw_error (NOT_AVAILABLE_ERROR,
1673 _("No caller in btrace record history"));
1675 caller = btrace_call_get (&it);
1677 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1678 pc = caller->insn.front ().pc;
1681 pc = caller->insn.back ().pc;
1682 pc += gdb_insn_length (gdbarch, pc);
1685 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1686 btrace_get_bfun_name (bfun), bfun->level,
1687 core_addr_to_string_nz (pc));
1689 return frame_unwind_got_address (this_frame, regnum, pc);
1692 /* Implement sniffer method for record_btrace_frame_unwind. */
1695 record_btrace_frame_sniffer (const struct frame_unwind *self,
1696 struct frame_info *this_frame,
1699 const struct btrace_function *bfun;
1700 struct btrace_frame_cache *cache;
1701 struct thread_info *tp;
1702 struct frame_info *next;
1704 /* THIS_FRAME does not contain a reference to its thread. */
1705 tp = find_thread_ptid (inferior_ptid);
1706 gdb_assert (tp != NULL);
1709 next = get_next_frame (this_frame);
1712 const struct btrace_insn_iterator *replay;
1714 replay = tp->btrace.replay;
1716 bfun = &replay->btinfo->functions[replay->call_index];
1720 const struct btrace_function *callee;
1721 struct btrace_call_iterator it;
1723 callee = btrace_get_frame_function (next);
1724 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1727 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1730 bfun = btrace_call_get (&it);
1736 DEBUG ("[frame] sniffed frame for %s on level %d",
1737 btrace_get_bfun_name (bfun), bfun->level);
1739 /* This is our frame. Initialize the frame cache. */
1740 cache = bfcache_new (this_frame);
1744 *this_cache = cache;
1748 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1751 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1752 struct frame_info *this_frame,
1755 const struct btrace_function *bfun, *callee;
1756 struct btrace_frame_cache *cache;
1757 struct btrace_call_iterator it;
1758 struct frame_info *next;
1759 struct thread_info *tinfo;
1761 next = get_next_frame (this_frame);
1765 callee = btrace_get_frame_function (next);
1769 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1772 tinfo = find_thread_ptid (inferior_ptid);
1773 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1776 bfun = btrace_call_get (&it);
1778 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1779 btrace_get_bfun_name (bfun), bfun->level);
1781 /* This is our frame. Initialize the frame cache. */
1782 cache = bfcache_new (this_frame);
1786 *this_cache = cache;
1791 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1793 struct btrace_frame_cache *cache;
1796 cache = (struct btrace_frame_cache *) this_cache;
1798 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1799 gdb_assert (slot != NULL);
1801 htab_remove_elt (bfcache, cache);
1804 /* btrace recording does not store previous memory content, neither the stack
1805 frames content. Any unwinding would return errorneous results as the stack
1806 contents no longer matches the changed PC value restored from history.
1807 Therefore this unwinder reports any possibly unwound registers as
1810 const struct frame_unwind record_btrace_frame_unwind =
1813 record_btrace_frame_unwind_stop_reason,
1814 record_btrace_frame_this_id,
1815 record_btrace_frame_prev_register,
1817 record_btrace_frame_sniffer,
1818 record_btrace_frame_dealloc_cache
1821 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1824 record_btrace_frame_unwind_stop_reason,
1825 record_btrace_frame_this_id,
1826 record_btrace_frame_prev_register,
1828 record_btrace_tailcall_frame_sniffer,
1829 record_btrace_frame_dealloc_cache
1832 /* Implement the to_get_unwinder method. */
1834 static const struct frame_unwind *
1835 record_btrace_to_get_unwinder (struct target_ops *self)
1837 return &record_btrace_frame_unwind;
1840 /* Implement the to_get_tailcall_unwinder method. */
1842 static const struct frame_unwind *
1843 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1845 return &record_btrace_tailcall_frame_unwind;
1848 /* Return a human-readable string for FLAG. */
1851 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1859 return "reverse-step";
1865 return "reverse-cont";
1874 /* Indicate that TP should be resumed according to FLAG. */
1877 record_btrace_resume_thread (struct thread_info *tp,
1878 enum btrace_thread_flag flag)
1880 struct btrace_thread_info *btinfo;
1882 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1883 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1885 btinfo = &tp->btrace;
1887 /* Fetch the latest branch trace. */
1888 btrace_fetch (tp, record_btrace_get_cpu ());
1890 /* A resume request overwrites a preceding resume or stop request. */
1891 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1892 btinfo->flags |= flag;
1895 /* Get the current frame for TP. */
1897 static struct frame_info *
1898 get_thread_current_frame (struct thread_info *tp)
1900 struct frame_info *frame;
1901 ptid_t old_inferior_ptid;
1904 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1905 old_inferior_ptid = inferior_ptid;
1906 inferior_ptid = tp->ptid;
1908 /* Clear the executing flag to allow changes to the current frame.
1909 We are not actually running, yet. We just started a reverse execution
1910 command or a record goto command.
1911 For the latter, EXECUTING is false and this has no effect.
1912 For the former, EXECUTING is true and we're in to_wait, about to
1913 move the thread. Since we need to recompute the stack, we temporarily
1914 set EXECUTING to flase. */
1915 executing = is_executing (inferior_ptid);
1916 set_executing (inferior_ptid, 0);
1921 frame = get_current_frame ();
1923 CATCH (except, RETURN_MASK_ALL)
1925 /* Restore the previous execution state. */
1926 set_executing (inferior_ptid, executing);
1928 /* Restore the previous inferior_ptid. */
1929 inferior_ptid = old_inferior_ptid;
1931 throw_exception (except);
1935 /* Restore the previous execution state. */
1936 set_executing (inferior_ptid, executing);
1938 /* Restore the previous inferior_ptid. */
1939 inferior_ptid = old_inferior_ptid;
1944 /* Start replaying a thread. */
1946 static struct btrace_insn_iterator *
1947 record_btrace_start_replaying (struct thread_info *tp)
1949 struct btrace_insn_iterator *replay;
1950 struct btrace_thread_info *btinfo;
1952 btinfo = &tp->btrace;
1955 /* We can't start replaying without trace. */
1956 if (btinfo->functions.empty ())
1959 /* GDB stores the current frame_id when stepping in order to detects steps
1961 Since frames are computed differently when we're replaying, we need to
1962 recompute those stored frames and fix them up so we can still detect
1963 subroutines after we started replaying. */
1966 struct frame_info *frame;
1967 struct frame_id frame_id;
1968 int upd_step_frame_id, upd_step_stack_frame_id;
1970 /* The current frame without replaying - computed via normal unwind. */
1971 frame = get_thread_current_frame (tp);
1972 frame_id = get_frame_id (frame);
1974 /* Check if we need to update any stepping-related frame id's. */
1975 upd_step_frame_id = frame_id_eq (frame_id,
1976 tp->control.step_frame_id);
1977 upd_step_stack_frame_id = frame_id_eq (frame_id,
1978 tp->control.step_stack_frame_id);
1980 /* We start replaying at the end of the branch trace. This corresponds
1981 to the current instruction. */
1982 replay = XNEW (struct btrace_insn_iterator);
1983 btrace_insn_end (replay, btinfo);
1985 /* Skip gaps at the end of the trace. */
1986 while (btrace_insn_get (replay) == NULL)
1990 steps = btrace_insn_prev (replay, 1);
1992 error (_("No trace."));
1995 /* We're not replaying, yet. */
1996 gdb_assert (btinfo->replay == NULL);
1997 btinfo->replay = replay;
1999 /* Make sure we're not using any stale registers. */
2000 registers_changed_ptid (tp->ptid);
2002 /* The current frame with replaying - computed via btrace unwind. */
2003 frame = get_thread_current_frame (tp);
2004 frame_id = get_frame_id (frame);
2006 /* Replace stepping related frames where necessary. */
2007 if (upd_step_frame_id)
2008 tp->control.step_frame_id = frame_id;
2009 if (upd_step_stack_frame_id)
2010 tp->control.step_stack_frame_id = frame_id;
2012 CATCH (except, RETURN_MASK_ALL)
2014 xfree (btinfo->replay);
2015 btinfo->replay = NULL;
2017 registers_changed_ptid (tp->ptid);
2019 throw_exception (except);
2026 /* Stop replaying a thread. */
2029 record_btrace_stop_replaying (struct thread_info *tp)
2031 struct btrace_thread_info *btinfo;
2033 btinfo = &tp->btrace;
2035 xfree (btinfo->replay);
2036 btinfo->replay = NULL;
2038 /* Make sure we're not leaving any stale registers. */
2039 registers_changed_ptid (tp->ptid);
2042 /* Stop replaying TP if it is at the end of its execution history. */
2045 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2047 struct btrace_insn_iterator *replay, end;
2048 struct btrace_thread_info *btinfo;
2050 btinfo = &tp->btrace;
2051 replay = btinfo->replay;
2056 btrace_insn_end (&end, btinfo);
2058 if (btrace_insn_cmp (replay, &end) == 0)
2059 record_btrace_stop_replaying (tp);
2062 /* The to_resume method of target record-btrace. */
2065 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2066 enum gdb_signal signal)
2068 struct thread_info *tp;
2069 enum btrace_thread_flag flag, cflag;
2071 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2072 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2073 step ? "step" : "cont");
2075 /* Store the execution direction of the last resume.
2077 If there is more than one to_resume call, we have to rely on infrun
2078 to not change the execution direction in-between. */
2079 record_btrace_resume_exec_dir = execution_direction;
2081 /* As long as we're not replaying, just forward the request.
2083 For non-stop targets this means that no thread is replaying. In order to
2084 make progress, we may need to explicitly move replaying threads to the end
2085 of their execution history. */
2086 if ((execution_direction != EXEC_REVERSE)
2087 && !record_btrace_is_replaying (ops, minus_one_ptid))
2090 ops->to_resume (ops, ptid, step, signal);
2094 /* Compute the btrace thread flag for the requested move. */
2095 if (execution_direction == EXEC_REVERSE)
2097 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2102 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2106 /* We just indicate the resume intent here. The actual stepping happens in
2107 record_btrace_wait below.
2109 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2110 if (!target_is_non_stop_p ())
2112 gdb_assert (ptid_match (inferior_ptid, ptid));
2114 ALL_NON_EXITED_THREADS (tp)
2115 if (ptid_match (tp->ptid, ptid))
2117 if (ptid_match (tp->ptid, inferior_ptid))
2118 record_btrace_resume_thread (tp, flag);
2120 record_btrace_resume_thread (tp, cflag);
2125 ALL_NON_EXITED_THREADS (tp)
2126 if (ptid_match (tp->ptid, ptid))
2127 record_btrace_resume_thread (tp, flag);
2130 /* Async support. */
2131 if (target_can_async_p ())
2134 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2138 /* The to_commit_resume method of target record-btrace. */
2141 record_btrace_commit_resume (struct target_ops *ops)
2143 if ((execution_direction != EXEC_REVERSE)
2144 && !record_btrace_is_replaying (ops, minus_one_ptid))
2145 ops->beneath->to_commit_resume (ops->beneath);
2148 /* Cancel resuming TP. */
2151 record_btrace_cancel_resume (struct thread_info *tp)
2153 enum btrace_thread_flag flags;
2155 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2159 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2160 print_thread_id (tp),
2161 target_pid_to_str (tp->ptid), flags,
2162 btrace_thread_flag_to_str (flags));
2164 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2165 record_btrace_stop_replaying_at_end (tp);
2168 /* Return a target_waitstatus indicating that we ran out of history. */
2170 static struct target_waitstatus
2171 btrace_step_no_history (void)
2173 struct target_waitstatus status;
2175 status.kind = TARGET_WAITKIND_NO_HISTORY;
2180 /* Return a target_waitstatus indicating that a step finished. */
2182 static struct target_waitstatus
2183 btrace_step_stopped (void)
2185 struct target_waitstatus status;
2187 status.kind = TARGET_WAITKIND_STOPPED;
2188 status.value.sig = GDB_SIGNAL_TRAP;
2193 /* Return a target_waitstatus indicating that a thread was stopped as
2196 static struct target_waitstatus
2197 btrace_step_stopped_on_request (void)
2199 struct target_waitstatus status;
2201 status.kind = TARGET_WAITKIND_STOPPED;
2202 status.value.sig = GDB_SIGNAL_0;
2207 /* Return a target_waitstatus indicating a spurious stop. */
2209 static struct target_waitstatus
2210 btrace_step_spurious (void)
2212 struct target_waitstatus status;
2214 status.kind = TARGET_WAITKIND_SPURIOUS;
2219 /* Return a target_waitstatus indicating that the thread was not resumed. */
2221 static struct target_waitstatus
2222 btrace_step_no_resumed (void)
2224 struct target_waitstatus status;
2226 status.kind = TARGET_WAITKIND_NO_RESUMED;
2231 /* Return a target_waitstatus indicating that we should wait again. */
2233 static struct target_waitstatus
2234 btrace_step_again (void)
2236 struct target_waitstatus status;
2238 status.kind = TARGET_WAITKIND_IGNORE;
2243 /* Clear the record histories. */
2246 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2248 xfree (btinfo->insn_history);
2249 xfree (btinfo->call_history);
2251 btinfo->insn_history = NULL;
2252 btinfo->call_history = NULL;
2255 /* Check whether TP's current replay position is at a breakpoint. */
2258 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2260 struct btrace_insn_iterator *replay;
2261 struct btrace_thread_info *btinfo;
2262 const struct btrace_insn *insn;
2263 struct inferior *inf;
2265 btinfo = &tp->btrace;
2266 replay = btinfo->replay;
2271 insn = btrace_insn_get (replay);
2275 inf = find_inferior_ptid (tp->ptid);
2279 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2280 &btinfo->stop_reason);
2283 /* Step one instruction in forward direction. */
2285 static struct target_waitstatus
2286 record_btrace_single_step_forward (struct thread_info *tp)
2288 struct btrace_insn_iterator *replay, end, start;
2289 struct btrace_thread_info *btinfo;
2291 btinfo = &tp->btrace;
2292 replay = btinfo->replay;
2294 /* We're done if we're not replaying. */
2296 return btrace_step_no_history ();
2298 /* Check if we're stepping a breakpoint. */
2299 if (record_btrace_replay_at_breakpoint (tp))
2300 return btrace_step_stopped ();
2302 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2303 jump back to the instruction at which we started. */
2309 /* We will bail out here if we continue stepping after reaching the end
2310 of the execution history. */
2311 steps = btrace_insn_next (replay, 1);
2315 return btrace_step_no_history ();
2318 while (btrace_insn_get (replay) == NULL);
2320 /* Determine the end of the instruction trace. */
2321 btrace_insn_end (&end, btinfo);
2323 /* The execution trace contains (and ends with) the current instruction.
2324 This instruction has not been executed, yet, so the trace really ends
2325 one instruction earlier. */
2326 if (btrace_insn_cmp (replay, &end) == 0)
2327 return btrace_step_no_history ();
2329 return btrace_step_spurious ();
2332 /* Step one instruction in backward direction. */
2334 static struct target_waitstatus
2335 record_btrace_single_step_backward (struct thread_info *tp)
2337 struct btrace_insn_iterator *replay, start;
2338 struct btrace_thread_info *btinfo;
2340 btinfo = &tp->btrace;
2341 replay = btinfo->replay;
2343 /* Start replaying if we're not already doing so. */
2345 replay = record_btrace_start_replaying (tp);
2347 /* If we can't step any further, we reached the end of the history.
2348 Skip gaps during replay. If we end up at a gap (at the beginning of
2349 the trace), jump back to the instruction at which we started. */
2355 steps = btrace_insn_prev (replay, 1);
2359 return btrace_step_no_history ();
2362 while (btrace_insn_get (replay) == NULL);
2364 /* Check if we're stepping a breakpoint.
2366 For reverse-stepping, this check is after the step. There is logic in
2367 infrun.c that handles reverse-stepping separately. See, for example,
2368 proceed and adjust_pc_after_break.
2370 This code assumes that for reverse-stepping, PC points to the last
2371 de-executed instruction, whereas for forward-stepping PC points to the
2372 next to-be-executed instruction. */
2373 if (record_btrace_replay_at_breakpoint (tp))
2374 return btrace_step_stopped ();
2376 return btrace_step_spurious ();
2379 /* Step a single thread. */
2381 static struct target_waitstatus
2382 record_btrace_step_thread (struct thread_info *tp)
2384 struct btrace_thread_info *btinfo;
2385 struct target_waitstatus status;
2386 enum btrace_thread_flag flags;
2388 btinfo = &tp->btrace;
2390 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2391 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2393 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2394 target_pid_to_str (tp->ptid), flags,
2395 btrace_thread_flag_to_str (flags));
2397 /* We can't step without an execution history. */
2398 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2399 return btrace_step_no_history ();
2404 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2407 return btrace_step_stopped_on_request ();
2410 status = record_btrace_single_step_forward (tp);
2411 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2414 return btrace_step_stopped ();
2417 status = record_btrace_single_step_backward (tp);
2418 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2421 return btrace_step_stopped ();
2424 status = record_btrace_single_step_forward (tp);
2425 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2428 btinfo->flags |= flags;
2429 return btrace_step_again ();
2432 status = record_btrace_single_step_backward (tp);
2433 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2436 btinfo->flags |= flags;
2437 return btrace_step_again ();
2440 /* We keep threads moving at the end of their execution history. The to_wait
2441 method will stop the thread for whom the event is reported. */
2442 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2443 btinfo->flags |= flags;
2448 /* A vector of threads. */
2450 typedef struct thread_info * tp_t;
2453 /* Announce further events if necessary. */
2456 record_btrace_maybe_mark_async_event
2457 (const std::vector<thread_info *> &moving,
2458 const std::vector<thread_info *> &no_history)
2460 bool more_moving = !moving.empty ();
2461 bool more_no_history = !no_history.empty ();;
2463 if (!more_moving && !more_no_history)
2467 DEBUG ("movers pending");
2469 if (more_no_history)
2470 DEBUG ("no-history pending");
2472 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2475 /* The to_wait method of target record-btrace. */
2478 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2479 struct target_waitstatus *status, int options)
2481 std::vector<thread_info *> moving;
2482 std::vector<thread_info *> no_history;
2484 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2486 /* As long as we're not replaying, just forward the request. */
2487 if ((execution_direction != EXEC_REVERSE)
2488 && !record_btrace_is_replaying (ops, minus_one_ptid))
2491 return ops->to_wait (ops, ptid, status, options);
2494 /* Keep a work list of moving threads. */
2498 ALL_NON_EXITED_THREADS (tp)
2500 if (ptid_match (tp->ptid, ptid)
2501 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2502 moving.push_back (tp);
2506 if (moving.empty ())
2508 *status = btrace_step_no_resumed ();
2510 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2511 target_waitstatus_to_string (status).c_str ());
2516 /* Step moving threads one by one, one step each, until either one thread
2517 reports an event or we run out of threads to step.
2519 When stepping more than one thread, chances are that some threads reach
2520 the end of their execution history earlier than others. If we reported
2521 this immediately, all-stop on top of non-stop would stop all threads and
2522 resume the same threads next time. And we would report the same thread
2523 having reached the end of its execution history again.
2525 In the worst case, this would starve the other threads. But even if other
2526 threads would be allowed to make progress, this would result in far too
2527 many intermediate stops.
2529 We therefore delay the reporting of "no execution history" until we have
2530 nothing else to report. By this time, all threads should have moved to
2531 either the beginning or the end of their execution history. There will
2532 be a single user-visible stop. */
2533 struct thread_info *eventing = NULL;
2534 while ((eventing == NULL) && !moving.empty ())
2536 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2538 thread_info *tp = moving[ix];
2540 *status = record_btrace_step_thread (tp);
2542 switch (status->kind)
2544 case TARGET_WAITKIND_IGNORE:
2548 case TARGET_WAITKIND_NO_HISTORY:
2549 no_history.push_back (ordered_remove (moving, ix));
2553 eventing = unordered_remove (moving, ix);
2559 if (eventing == NULL)
2561 /* We started with at least one moving thread. This thread must have
2562 either stopped or reached the end of its execution history.
2564 In the former case, EVENTING must not be NULL.
2565 In the latter case, NO_HISTORY must not be empty. */
2566 gdb_assert (!no_history.empty ());
2568 /* We kept threads moving at the end of their execution history. Stop
2569 EVENTING now that we are going to report its stop. */
2570 eventing = unordered_remove (no_history, 0);
2571 eventing->btrace.flags &= ~BTHR_MOVE;
2573 *status = btrace_step_no_history ();
2576 gdb_assert (eventing != NULL);
2578 /* We kept threads replaying at the end of their execution history. Stop
2579 replaying EVENTING now that we are going to report its stop. */
2580 record_btrace_stop_replaying_at_end (eventing);
2582 /* Stop all other threads. */
2583 if (!target_is_non_stop_p ())
2587 ALL_NON_EXITED_THREADS (tp)
2588 record_btrace_cancel_resume (tp);
2591 /* In async mode, we need to announce further events. */
2592 if (target_is_async_p ())
2593 record_btrace_maybe_mark_async_event (moving, no_history);
2595 /* Start record histories anew from the current position. */
2596 record_btrace_clear_histories (&eventing->btrace);
2598 /* We moved the replay position but did not update registers. */
2599 registers_changed_ptid (eventing->ptid);
2601 DEBUG ("wait ended by thread %s (%s): %s",
2602 print_thread_id (eventing),
2603 target_pid_to_str (eventing->ptid),
2604 target_waitstatus_to_string (status).c_str ());
2606 return eventing->ptid;
2609 /* The to_stop method of target record-btrace. */
2612 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2614 DEBUG ("stop %s", target_pid_to_str (ptid));
2616 /* As long as we're not replaying, just forward the request. */
2617 if ((execution_direction != EXEC_REVERSE)
2618 && !record_btrace_is_replaying (ops, minus_one_ptid))
2621 ops->to_stop (ops, ptid);
2625 struct thread_info *tp;
2627 ALL_NON_EXITED_THREADS (tp)
2628 if (ptid_match (tp->ptid, ptid))
2630 tp->btrace.flags &= ~BTHR_MOVE;
2631 tp->btrace.flags |= BTHR_STOP;
2636 /* The to_can_execute_reverse method of target record-btrace. */
2639 record_btrace_can_execute_reverse (struct target_ops *self)
2644 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2647 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2649 if (record_btrace_is_replaying (ops, minus_one_ptid))
2651 struct thread_info *tp = inferior_thread ();
2653 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2656 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2659 /* The to_supports_stopped_by_sw_breakpoint method of target
2663 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2665 if (record_btrace_is_replaying (ops, minus_one_ptid))
2668 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2671 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2674 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2676 if (record_btrace_is_replaying (ops, minus_one_ptid))
2678 struct thread_info *tp = inferior_thread ();
2680 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2683 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2686 /* The to_supports_stopped_by_hw_breakpoint method of target
2690 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2692 if (record_btrace_is_replaying (ops, minus_one_ptid))
2695 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2698 /* The to_update_thread_list method of target record-btrace. */
2701 record_btrace_update_thread_list (struct target_ops *ops)
2703 /* We don't add or remove threads during replay. */
2704 if (record_btrace_is_replaying (ops, minus_one_ptid))
2707 /* Forward the request. */
2709 ops->to_update_thread_list (ops);
2712 /* The to_thread_alive method of target record-btrace. */
2715 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2717 /* We don't add or remove threads during replay. */
2718 if (record_btrace_is_replaying (ops, minus_one_ptid))
2719 return find_thread_ptid (ptid) != NULL;
2721 /* Forward the request. */
2723 return ops->to_thread_alive (ops, ptid);
2726 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2730 record_btrace_set_replay (struct thread_info *tp,
2731 const struct btrace_insn_iterator *it)
2733 struct btrace_thread_info *btinfo;
2735 btinfo = &tp->btrace;
2738 record_btrace_stop_replaying (tp);
2741 if (btinfo->replay == NULL)
2742 record_btrace_start_replaying (tp);
2743 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2746 *btinfo->replay = *it;
2747 registers_changed_ptid (tp->ptid);
2750 /* Start anew from the new replay position. */
2751 record_btrace_clear_histories (btinfo);
2753 stop_pc = regcache_read_pc (get_current_regcache ());
2754 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2757 /* The to_goto_record_begin method of target record-btrace. */
2760 record_btrace_goto_begin (struct target_ops *self)
2762 struct thread_info *tp;
2763 struct btrace_insn_iterator begin;
2765 tp = require_btrace_thread ();
2767 btrace_insn_begin (&begin, &tp->btrace);
2769 /* Skip gaps at the beginning of the trace. */
2770 while (btrace_insn_get (&begin) == NULL)
2774 steps = btrace_insn_next (&begin, 1);
2776 error (_("No trace."));
2779 record_btrace_set_replay (tp, &begin);
2782 /* The to_goto_record_end method of target record-btrace. */
2785 record_btrace_goto_end (struct target_ops *ops)
2787 struct thread_info *tp;
2789 tp = require_btrace_thread ();
2791 record_btrace_set_replay (tp, NULL);
2794 /* The to_goto_record method of target record-btrace. */
2797 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2799 struct thread_info *tp;
2800 struct btrace_insn_iterator it;
2801 unsigned int number;
2806 /* Check for wrap-arounds. */
2808 error (_("Instruction number out of range."));
2810 tp = require_btrace_thread ();
2812 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2814 /* Check if the instruction could not be found or is a gap. */
2815 if (found == 0 || btrace_insn_get (&it) == NULL)
2816 error (_("No such instruction."));
2818 record_btrace_set_replay (tp, &it);
2821 /* The to_record_stop_replaying method of target record-btrace. */
2824 record_btrace_stop_replaying_all (struct target_ops *self)
2826 struct thread_info *tp;
2828 ALL_NON_EXITED_THREADS (tp)
2829 record_btrace_stop_replaying (tp);
2832 /* The to_execution_direction target method. */
2834 static enum exec_direction_kind
2835 record_btrace_execution_direction (struct target_ops *self)
2837 return record_btrace_resume_exec_dir;
2840 /* The to_prepare_to_generate_core target method. */
2843 record_btrace_prepare_to_generate_core (struct target_ops *self)
2845 record_btrace_generating_corefile = 1;
2848 /* The to_done_generating_core target method. */
2851 record_btrace_done_generating_core (struct target_ops *self)
2853 record_btrace_generating_corefile = 0;
2856 /* Initialize the record-btrace target ops. */
2859 init_record_btrace_ops (void)
2861 struct target_ops *ops;
2863 ops = &record_btrace_ops;
2864 ops->to_shortname = "record-btrace";
2865 ops->to_longname = "Branch tracing target";
2866 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2867 ops->to_open = record_btrace_open;
2868 ops->to_close = record_btrace_close;
2869 ops->to_async = record_btrace_async;
2870 ops->to_detach = record_detach;
2871 ops->to_disconnect = record_btrace_disconnect;
2872 ops->to_mourn_inferior = record_mourn_inferior;
2873 ops->to_kill = record_kill;
2874 ops->to_stop_recording = record_btrace_stop_recording;
2875 ops->to_info_record = record_btrace_info;
2876 ops->to_insn_history = record_btrace_insn_history;
2877 ops->to_insn_history_from = record_btrace_insn_history_from;
2878 ops->to_insn_history_range = record_btrace_insn_history_range;
2879 ops->to_call_history = record_btrace_call_history;
2880 ops->to_call_history_from = record_btrace_call_history_from;
2881 ops->to_call_history_range = record_btrace_call_history_range;
2882 ops->to_record_method = record_btrace_record_method;
2883 ops->to_record_is_replaying = record_btrace_is_replaying;
2884 ops->to_record_will_replay = record_btrace_will_replay;
2885 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2886 ops->to_xfer_partial = record_btrace_xfer_partial;
2887 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2888 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2889 ops->to_fetch_registers = record_btrace_fetch_registers;
2890 ops->to_store_registers = record_btrace_store_registers;
2891 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2892 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2893 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2894 ops->to_resume = record_btrace_resume;
2895 ops->to_commit_resume = record_btrace_commit_resume;
2896 ops->to_wait = record_btrace_wait;
2897 ops->to_stop = record_btrace_stop;
2898 ops->to_update_thread_list = record_btrace_update_thread_list;
2899 ops->to_thread_alive = record_btrace_thread_alive;
2900 ops->to_goto_record_begin = record_btrace_goto_begin;
2901 ops->to_goto_record_end = record_btrace_goto_end;
2902 ops->to_goto_record = record_btrace_goto;
2903 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2904 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2905 ops->to_supports_stopped_by_sw_breakpoint
2906 = record_btrace_supports_stopped_by_sw_breakpoint;
2907 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2908 ops->to_supports_stopped_by_hw_breakpoint
2909 = record_btrace_supports_stopped_by_hw_breakpoint;
2910 ops->to_execution_direction = record_btrace_execution_direction;
2911 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2912 ops->to_done_generating_core = record_btrace_done_generating_core;
2913 ops->to_stratum = record_stratum;
2914 ops->to_magic = OPS_MAGIC;
2917 /* Start recording in BTS format. */
2920 cmd_record_btrace_bts_start (const char *args, int from_tty)
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2925 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2929 execute_command ("target record-btrace", from_tty);
2931 CATCH (exception, RETURN_MASK_ALL)
2933 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2934 throw_exception (exception);
2939 /* Start recording in Intel Processor Trace format. */
2942 cmd_record_btrace_pt_start (const char *args, int from_tty)
2944 if (args != NULL && *args != 0)
2945 error (_("Invalid argument."));
2947 record_btrace_conf.format = BTRACE_FORMAT_PT;
2951 execute_command ("target record-btrace", from_tty);
2953 CATCH (exception, RETURN_MASK_ALL)
2955 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2956 throw_exception (exception);
2961 /* Alias for "target record". */
2964 cmd_record_btrace_start (const char *args, int from_tty)
2966 if (args != NULL && *args != 0)
2967 error (_("Invalid argument."));
2969 record_btrace_conf.format = BTRACE_FORMAT_PT;
2973 execute_command ("target record-btrace", from_tty);
2975 CATCH (exception, RETURN_MASK_ALL)
2977 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2981 execute_command ("target record-btrace", from_tty);
2983 CATCH (exception, RETURN_MASK_ALL)
2985 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2986 throw_exception (exception);
2993 /* The "set record btrace" command. */
2996 cmd_set_record_btrace (const char *args, int from_tty)
2998 printf_unfiltered (_("\"set record btrace\" must be followed "
2999 "by an appropriate subcommand.\n"));
3000 help_list (set_record_btrace_cmdlist, "set record btrace ",
3001 all_commands, gdb_stdout);
3004 /* The "show record btrace" command. */
3007 cmd_show_record_btrace (const char *args, int from_tty)
3009 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3012 /* The "show record btrace replay-memory-access" command. */
3015 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3016 struct cmd_list_element *c, const char *value)
3018 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3019 replay_memory_access);
3022 /* The "set record btrace cpu none" command. */
3025 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3027 if (args != nullptr && *args != 0)
3028 error (_("Trailing junk: '%s'."), args);
3030 record_btrace_cpu_state = CS_NONE;
3033 /* The "set record btrace cpu auto" command. */
3036 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3038 if (args != nullptr && *args != 0)
3039 error (_("Trailing junk: '%s'."), args);
3041 record_btrace_cpu_state = CS_AUTO;
3044 /* The "set record btrace cpu" command. */
3047 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3049 if (args == nullptr)
3052 /* We use a hard-coded vendor string for now. */
3053 unsigned int family, model, stepping;
3054 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3055 &model, &l1, &stepping, &l2);
3058 if (strlen (args) != l2)
3059 error (_("Trailing junk: '%s'."), args + l2);
3061 else if (matches == 2)
3063 if (strlen (args) != l1)
3064 error (_("Trailing junk: '%s'."), args + l1);
3069 error (_("Bad format. See \"help set record btrace cpu\"."));
3071 if (USHRT_MAX < family)
3072 error (_("Cpu family too big."));
3074 if (UCHAR_MAX < model)
3075 error (_("Cpu model too big."));
3077 if (UCHAR_MAX < stepping)
3078 error (_("Cpu stepping too big."));
3080 record_btrace_cpu.vendor = CV_INTEL;
3081 record_btrace_cpu.family = family;
3082 record_btrace_cpu.model = model;
3083 record_btrace_cpu.stepping = stepping;
3085 record_btrace_cpu_state = CS_CPU;
3088 /* The "show record btrace cpu" command. */
3091 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3095 if (args != nullptr && *args != 0)
3096 error (_("Trailing junk: '%s'."), args);
3098 switch (record_btrace_cpu_state)
3101 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3105 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3109 switch (record_btrace_cpu.vendor)
3112 if (record_btrace_cpu.stepping == 0)
3113 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3114 record_btrace_cpu.family,
3115 record_btrace_cpu.model);
3117 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3118 record_btrace_cpu.family,
3119 record_btrace_cpu.model,
3120 record_btrace_cpu.stepping);
3125 error (_("Internal error: bad cpu state."));
3128 /* The "s record btrace bts" command. */
3131 cmd_set_record_btrace_bts (const char *args, int from_tty)
3133 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3134 "by an appropriate subcommand.\n"));
3135 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3136 all_commands, gdb_stdout);
3139 /* The "show record btrace bts" command. */
3142 cmd_show_record_btrace_bts (const char *args, int from_tty)
3144 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3147 /* The "set record btrace pt" command. */
3150 cmd_set_record_btrace_pt (const char *args, int from_tty)
3152 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3153 "by an appropriate subcommand.\n"));
3154 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3155 all_commands, gdb_stdout);
3158 /* The "show record btrace pt" command. */
3161 cmd_show_record_btrace_pt (const char *args, int from_tty)
3163 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3166 /* The "record bts buffer-size" show value function. */
3169 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3170 struct cmd_list_element *c,
3173 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3177 /* The "record pt buffer-size" show value function. */
3180 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3181 struct cmd_list_element *c,
3184 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3188 /* Initialize btrace commands. */
3191 _initialize_record_btrace (void)
3193 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3194 _("Start branch trace recording."), &record_btrace_cmdlist,
3195 "record btrace ", 0, &record_cmdlist);
3196 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3198 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3200 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3201 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3202 This format may not be available on all processors."),
3203 &record_btrace_cmdlist);
3204 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3206 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3208 Start branch trace recording in Intel Processor Trace format.\n\n\
3209 This format may not be available on all processors."),
3210 &record_btrace_cmdlist);
3211 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3213 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3214 _("Set record options"), &set_record_btrace_cmdlist,
3215 "set record btrace ", 0, &set_record_cmdlist);
3217 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3218 _("Show record options"), &show_record_btrace_cmdlist,
3219 "show record btrace ", 0, &show_record_cmdlist);
3221 add_setshow_enum_cmd ("replay-memory-access", no_class,
3222 replay_memory_access_types, &replay_memory_access, _("\
3223 Set what memory accesses are allowed during replay."), _("\
3224 Show what memory accesses are allowed during replay."),
3225 _("Default is READ-ONLY.\n\n\
3226 The btrace record target does not trace data.\n\
3227 The memory therefore corresponds to the live target and not \
3228 to the current replay position.\n\n\
3229 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3230 When READ-WRITE, allow accesses to read-only and read-write memory during \
3232 NULL, cmd_show_replay_memory_access,
3233 &set_record_btrace_cmdlist,
3234 &show_record_btrace_cmdlist);
3236 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3238 Set the cpu to be used for trace decode.\n\n\
3239 The format is \"<vendor>:<identifier>\" or \"none\" or \"auto\" (default).\n\
3240 For vendor \"intel\" the format is \"<family>/<model>[/<stepping>]\".\n\n\
3241 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3242 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3243 When GDB does not support that cpu, this option can be used to enable\n\
3244 workarounds for a similar cpu that GDB supports.\n\n\
3245 When set to \"none\", errata workarounds are disabled."),
3246 &set_record_btrace_cpu_cmdlist,
3247 _("set record btrace cpu "), 1,
3248 &set_record_btrace_cmdlist);
3250 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3251 Automatically determine the cpu to be used for trace decode."),
3252 &set_record_btrace_cpu_cmdlist);
3254 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3255 Do not enable errata workarounds for trace decode."),
3256 &set_record_btrace_cpu_cmdlist);
3258 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3259 Show the cpu to be used for trace decode."),
3260 &show_record_btrace_cmdlist);
3262 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3263 _("Set record btrace bts options"),
3264 &set_record_btrace_bts_cmdlist,
3265 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3267 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3268 _("Show record btrace bts options"),
3269 &show_record_btrace_bts_cmdlist,
3270 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3272 add_setshow_uinteger_cmd ("buffer-size", no_class,
3273 &record_btrace_conf.bts.size,
3274 _("Set the record/replay bts buffer size."),
3275 _("Show the record/replay bts buffer size."), _("\
3276 When starting recording request a trace buffer of this size. \
3277 The actual buffer size may differ from the requested size. \
3278 Use \"info record\" to see the actual buffer size.\n\n\
3279 Bigger buffers allow longer recording but also take more time to process \
3280 the recorded execution trace.\n\n\
3281 The trace buffer size may not be changed while recording."), NULL,
3282 show_record_bts_buffer_size_value,
3283 &set_record_btrace_bts_cmdlist,
3284 &show_record_btrace_bts_cmdlist);
3286 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3287 _("Set record btrace pt options"),
3288 &set_record_btrace_pt_cmdlist,
3289 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3291 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3292 _("Show record btrace pt options"),
3293 &show_record_btrace_pt_cmdlist,
3294 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3296 add_setshow_uinteger_cmd ("buffer-size", no_class,
3297 &record_btrace_conf.pt.size,
3298 _("Set the record/replay pt buffer size."),
3299 _("Show the record/replay pt buffer size."), _("\
3300 Bigger buffers allow longer recording but also take more time to process \
3301 the recorded execution.\n\
3302 The actual buffer size may differ from the requested size. Use \"info record\" \
3303 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3304 &set_record_btrace_pt_cmdlist,
3305 &show_record_btrace_pt_cmdlist);
3307 init_record_btrace_ops ();
3308 add_target (&record_btrace_ops);
3310 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3313 record_btrace_conf.bts.size = 64 * 1024;
3314 record_btrace_conf.pt.size = 16 * 1024;