1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83 /* Print a record-btrace debug message. Use do ... while (0) to avoid
84 ambiguities when used in if statements. */
86 #define DEBUG(msg, args...) \
89 if (record_debug != 0) \
90 fprintf_unfiltered (gdb_stdlog, \
91 "[record-btrace] " msg "\n", ##args); \
96 /* Update the branch trace for the current thread and return a pointer to its
99 Throws an error if there is no thread or no trace. This function never
102 static struct thread_info *
103 require_btrace_thread (void)
105 struct thread_info *tp;
109 tp = find_thread_ptid (inferior_ptid);
111 error (_("No thread."));
115 if (btrace_is_empty (tp))
116 error (_("No trace."));
121 /* Update the branch trace for the current thread and return a pointer to its
122 branch trace information struct.
124 Throws an error if there is no thread or no trace. This function never
127 static struct btrace_thread_info *
128 require_btrace (void)
130 struct thread_info *tp;
132 tp = require_btrace_thread ();
137 /* Enable branch tracing for one thread. Warn on errors. */
140 record_btrace_enable_warn (struct thread_info *tp)
144 btrace_enable (tp, &record_btrace_conf);
146 CATCH (error, RETURN_MASK_ERROR)
148 warning ("%s", error.message);
153 /* Callback function to disable branch tracing for one thread. */
156 record_btrace_disable_callback (void *arg)
158 struct thread_info *tp;
165 /* Enable automatic tracing of new threads. */
168 record_btrace_auto_enable (void)
170 DEBUG ("attach thread observer");
172 record_btrace_thread_observer
173 = observer_attach_new_thread (record_btrace_enable_warn);
176 /* Disable automatic tracing of new threads. */
179 record_btrace_auto_disable (void)
181 /* The observer may have been detached, already. */
182 if (record_btrace_thread_observer == NULL)
185 DEBUG ("detach thread observer");
187 observer_detach_new_thread (record_btrace_thread_observer);
188 record_btrace_thread_observer = NULL;
191 /* The record-btrace async event handler function. */
194 record_btrace_handle_async_inferior_event (gdb_client_data data)
196 inferior_event_handler (INF_REG_EVENT, NULL);
199 /* The to_open method of target record-btrace. */
202 record_btrace_open (const char *args, int from_tty)
204 struct cleanup *disable_chain;
205 struct thread_info *tp;
211 if (!target_has_execution)
212 error (_("The program is not being run."));
215 error (_("Record btrace can't debug inferior in non-stop mode."));
217 gdb_assert (record_btrace_thread_observer == NULL);
219 disable_chain = make_cleanup (null_cleanup, NULL);
220 ALL_NON_EXITED_THREADS (tp)
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
223 btrace_enable (tp, &record_btrace_conf);
225 make_cleanup (record_btrace_disable_callback, tp);
228 record_btrace_auto_enable ();
230 push_target (&record_btrace_ops);
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
235 record_btrace_generating_corefile = 0;
237 observer_notify_record_changed (current_inferior (), 1);
239 discard_cleanups (disable_chain);
242 /* The to_stop_recording method of target record-btrace. */
245 record_btrace_stop_recording (struct target_ops *self)
247 struct thread_info *tp;
249 DEBUG ("stop recording");
251 record_btrace_auto_disable ();
253 ALL_NON_EXITED_THREADS (tp)
254 if (tp->btrace.target != NULL)
258 /* The to_close method of target record-btrace. */
261 record_btrace_close (struct target_ops *self)
263 struct thread_info *tp;
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp)
275 btrace_teardown (tp);
278 /* The to_async method of target record-btrace. */
281 record_btrace_async (struct target_ops *ops, int enable)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
288 ops->beneath->to_async (ops->beneath, enable);
291 /* Adjusts the size and returns a human readable size suffix. */
294 record_btrace_adjust_size (unsigned int *size)
300 if ((sz & ((1u << 30) - 1)) == 0)
305 else if ((sz & ((1u << 20) - 1)) == 0)
310 else if ((sz & ((1u << 10) - 1)) == 0)
319 /* Print a BTS configuration. */
322 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
335 /* Print a branch tracing configuration. */
338 record_btrace_print_conf (const struct btrace_config *conf)
340 printf_unfiltered (_("Recording format: %s.\n"),
341 btrace_format_string (conf->format));
343 switch (conf->format)
345 case BTRACE_FORMAT_NONE:
348 case BTRACE_FORMAT_BTS:
349 record_btrace_print_bts_conf (&conf->bts);
353 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
356 /* The to_info_record method of target record-btrace. */
359 record_btrace_info (struct target_ops *self)
361 struct btrace_thread_info *btinfo;
362 const struct btrace_config *conf;
363 struct thread_info *tp;
364 unsigned int insns, calls, gaps;
368 tp = find_thread_ptid (inferior_ptid);
370 error (_("No thread."));
372 btinfo = &tp->btrace;
374 conf = btrace_conf (btinfo);
376 record_btrace_print_conf (conf);
384 if (!btrace_is_empty (tp))
386 struct btrace_call_iterator call;
387 struct btrace_insn_iterator insn;
389 btrace_call_end (&call, btinfo);
390 btrace_call_prev (&call, 1);
391 calls = btrace_call_number (&call);
393 btrace_insn_end (&insn, btinfo);
395 insns = btrace_insn_number (&insn);
398 /* The last instruction does not really belong to the trace. */
405 /* Skip gaps at the end. */
408 steps = btrace_insn_prev (&insn, 1);
412 insns = btrace_insn_number (&insn);
417 gaps = btinfo->ngaps;
420 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
421 "for thread %d (%s).\n"), insns, calls, gaps,
422 tp->num, target_pid_to_str (tp->ptid));
424 if (btrace_is_replaying (tp))
425 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
426 btrace_insn_number (btinfo->replay));
429 /* Print a decode error. */
432 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
433 enum btrace_format format)
438 errstr = _("unknown");
446 case BTRACE_FORMAT_BTS:
452 case BDE_BTS_OVERFLOW:
453 errstr = _("instruction overflow");
456 case BDE_BTS_INSN_SIZE:
457 errstr = _("unknown instruction");
463 ui_out_text (uiout, _("["));
466 ui_out_text (uiout, _("decode error ("));
467 ui_out_field_int (uiout, "errcode", errcode);
468 ui_out_text (uiout, _("): "));
470 ui_out_text (uiout, errstr);
471 ui_out_text (uiout, _("]\n"));
474 /* Print an unsigned int. */
477 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
479 ui_out_field_fmt (uiout, fld, "%u", val);
482 /* Disassemble a section of the recorded instruction trace. */
485 btrace_insn_history (struct ui_out *uiout,
486 const struct btrace_thread_info *btinfo,
487 const struct btrace_insn_iterator *begin,
488 const struct btrace_insn_iterator *end, int flags)
490 struct gdbarch *gdbarch;
491 struct btrace_insn_iterator it;
493 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
494 btrace_insn_number (end));
496 gdbarch = target_gdbarch ();
498 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
500 const struct btrace_insn *insn;
502 insn = btrace_insn_get (&it);
504 /* A NULL instruction indicates a gap in the trace. */
507 const struct btrace_config *conf;
509 conf = btrace_conf (btinfo);
511 /* We have trace so we must have a configuration. */
512 gdb_assert (conf != NULL);
514 btrace_ui_out_decode_error (uiout, it.function->errcode,
519 /* Print the instruction index. */
520 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
521 ui_out_text (uiout, "\t");
523 /* Disassembly with '/m' flag may not produce the expected result.
525 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc,
531 /* The to_insn_history method of target record-btrace. */
534 record_btrace_insn_history (struct target_ops *self, int size, int flags)
536 struct btrace_thread_info *btinfo;
537 struct btrace_insn_history *history;
538 struct btrace_insn_iterator begin, end;
539 struct cleanup *uiout_cleanup;
540 struct ui_out *uiout;
541 unsigned int context, covered;
543 uiout = current_uiout;
544 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
546 context = abs (size);
548 error (_("Bad record instruction-history-size."));
550 btinfo = require_btrace ();
551 history = btinfo->insn_history;
554 struct btrace_insn_iterator *replay;
556 DEBUG ("insn-history (0x%x): %d", flags, size);
558 /* If we're replaying, we start at the replay position. Otherwise, we
559 start at the tail of the trace. */
560 replay = btinfo->replay;
564 btrace_insn_end (&begin, btinfo);
566 /* We start from here and expand in the requested direction. Then we
567 expand in the other direction, as well, to fill up any remaining
572 /* We want the current position covered, as well. */
573 covered = btrace_insn_next (&end, 1);
574 covered += btrace_insn_prev (&begin, context - covered);
575 covered += btrace_insn_next (&end, context - covered);
579 covered = btrace_insn_next (&end, context);
580 covered += btrace_insn_prev (&begin, context - covered);
585 begin = history->begin;
588 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
589 btrace_insn_number (&begin), btrace_insn_number (&end));
594 covered = btrace_insn_prev (&begin, context);
599 covered = btrace_insn_next (&end, context);
604 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
608 printf_unfiltered (_("At the start of the branch trace record.\n"));
610 printf_unfiltered (_("At the end of the branch trace record.\n"));
613 btrace_set_insn_history (btinfo, &begin, &end);
614 do_cleanups (uiout_cleanup);
617 /* The to_insn_history_range method of target record-btrace. */
620 record_btrace_insn_history_range (struct target_ops *self,
621 ULONGEST from, ULONGEST to, int flags)
623 struct btrace_thread_info *btinfo;
624 struct btrace_insn_history *history;
625 struct btrace_insn_iterator begin, end;
626 struct cleanup *uiout_cleanup;
627 struct ui_out *uiout;
628 unsigned int low, high;
631 uiout = current_uiout;
632 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
637 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
639 /* Check for wrap-arounds. */
640 if (low != from || high != to)
641 error (_("Bad range."));
644 error (_("Bad range."));
646 btinfo = require_btrace ();
648 found = btrace_find_insn_by_number (&begin, btinfo, low);
650 error (_("Range out of bounds."));
652 found = btrace_find_insn_by_number (&end, btinfo, high);
655 /* Silently truncate the range. */
656 btrace_insn_end (&end, btinfo);
660 /* We want both begin and end to be inclusive. */
661 btrace_insn_next (&end, 1);
664 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
665 btrace_set_insn_history (btinfo, &begin, &end);
667 do_cleanups (uiout_cleanup);
670 /* The to_insn_history_from method of target record-btrace. */
673 record_btrace_insn_history_from (struct target_ops *self,
674 ULONGEST from, int size, int flags)
676 ULONGEST begin, end, context;
678 context = abs (size);
680 error (_("Bad record instruction-history-size."));
689 begin = from - context + 1;
694 end = from + context - 1;
696 /* Check for wrap-around. */
701 record_btrace_insn_history_range (self, begin, end, flags);
704 /* Print the instruction number range for a function call history line. */
707 btrace_call_history_insn_range (struct ui_out *uiout,
708 const struct btrace_function *bfun)
710 unsigned int begin, end, size;
712 size = VEC_length (btrace_insn_s, bfun->insn);
713 gdb_assert (size > 0);
715 begin = bfun->insn_offset;
716 end = begin + size - 1;
718 ui_out_field_uint (uiout, "insn begin", begin);
719 ui_out_text (uiout, ",");
720 ui_out_field_uint (uiout, "insn end", end);
723 /* Compute the lowest and highest source line for the instructions in BFUN
724 and return them in PBEGIN and PEND.
725 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
726 result from inlining or macro expansion. */
729 btrace_compute_src_line_range (const struct btrace_function *bfun,
730 int *pbegin, int *pend)
732 struct btrace_insn *insn;
733 struct symtab *symtab;
745 symtab = symbol_symtab (sym);
747 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
749 struct symtab_and_line sal;
751 sal = find_pc_line (insn->pc, 0);
752 if (sal.symtab != symtab || sal.line == 0)
755 begin = min (begin, sal.line);
756 end = max (end, sal.line);
764 /* Print the source line information for a function call history line. */
767 btrace_call_history_src_line (struct ui_out *uiout,
768 const struct btrace_function *bfun)
777 ui_out_field_string (uiout, "file",
778 symtab_to_filename_for_display (symbol_symtab (sym)));
780 btrace_compute_src_line_range (bfun, &begin, &end);
784 ui_out_text (uiout, ":");
785 ui_out_field_int (uiout, "min line", begin);
790 ui_out_text (uiout, ",");
791 ui_out_field_int (uiout, "max line", end);
794 /* Get the name of a branch trace function. */
797 btrace_get_bfun_name (const struct btrace_function *bfun)
799 struct minimal_symbol *msym;
809 return SYMBOL_PRINT_NAME (sym);
810 else if (msym != NULL)
811 return MSYMBOL_PRINT_NAME (msym);
816 /* Disassemble a section of the recorded function trace. */
819 btrace_call_history (struct ui_out *uiout,
820 const struct btrace_thread_info *btinfo,
821 const struct btrace_call_iterator *begin,
822 const struct btrace_call_iterator *end,
823 enum record_print_flag flags)
825 struct btrace_call_iterator it;
827 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
828 btrace_call_number (end));
830 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
832 const struct btrace_function *bfun;
833 struct minimal_symbol *msym;
836 bfun = btrace_call_get (&it);
840 /* Print the function index. */
841 ui_out_field_uint (uiout, "index", bfun->number);
842 ui_out_text (uiout, "\t");
844 /* Indicate gaps in the trace. */
845 if (bfun->errcode != 0)
847 const struct btrace_config *conf;
849 conf = btrace_conf (btinfo);
851 /* We have trace so we must have a configuration. */
852 gdb_assert (conf != NULL);
854 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
859 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
861 int level = bfun->level + btinfo->level, i;
863 for (i = 0; i < level; ++i)
864 ui_out_text (uiout, " ");
868 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
869 else if (msym != NULL)
870 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
871 else if (!ui_out_is_mi_like_p (uiout))
872 ui_out_field_string (uiout, "function", "??");
874 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
876 ui_out_text (uiout, _("\tinst "));
877 btrace_call_history_insn_range (uiout, bfun);
880 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
882 ui_out_text (uiout, _("\tat "));
883 btrace_call_history_src_line (uiout, bfun);
886 ui_out_text (uiout, "\n");
890 /* The to_call_history method of target record-btrace. */
893 record_btrace_call_history (struct target_ops *self, int size, int flags)
895 struct btrace_thread_info *btinfo;
896 struct btrace_call_history *history;
897 struct btrace_call_iterator begin, end;
898 struct cleanup *uiout_cleanup;
899 struct ui_out *uiout;
900 unsigned int context, covered;
902 uiout = current_uiout;
903 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
905 context = abs (size);
907 error (_("Bad record function-call-history-size."));
909 btinfo = require_btrace ();
910 history = btinfo->call_history;
913 struct btrace_insn_iterator *replay;
915 DEBUG ("call-history (0x%x): %d", flags, size);
917 /* If we're replaying, we start at the replay position. Otherwise, we
918 start at the tail of the trace. */
919 replay = btinfo->replay;
922 begin.function = replay->function;
923 begin.btinfo = btinfo;
926 btrace_call_end (&begin, btinfo);
928 /* We start from here and expand in the requested direction. Then we
929 expand in the other direction, as well, to fill up any remaining
934 /* We want the current position covered, as well. */
935 covered = btrace_call_next (&end, 1);
936 covered += btrace_call_prev (&begin, context - covered);
937 covered += btrace_call_next (&end, context - covered);
941 covered = btrace_call_next (&end, context);
942 covered += btrace_call_prev (&begin, context- covered);
947 begin = history->begin;
950 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
951 btrace_call_number (&begin), btrace_call_number (&end));
956 covered = btrace_call_prev (&begin, context);
961 covered = btrace_call_next (&end, context);
966 btrace_call_history (uiout, btinfo, &begin, &end, flags);
970 printf_unfiltered (_("At the start of the branch trace record.\n"));
972 printf_unfiltered (_("At the end of the branch trace record.\n"));
975 btrace_set_call_history (btinfo, &begin, &end);
976 do_cleanups (uiout_cleanup);
979 /* The to_call_history_range method of target record-btrace. */
982 record_btrace_call_history_range (struct target_ops *self,
983 ULONGEST from, ULONGEST to, int flags)
985 struct btrace_thread_info *btinfo;
986 struct btrace_call_history *history;
987 struct btrace_call_iterator begin, end;
988 struct cleanup *uiout_cleanup;
989 struct ui_out *uiout;
990 unsigned int low, high;
993 uiout = current_uiout;
994 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
999 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1001 /* Check for wrap-arounds. */
1002 if (low != from || high != to)
1003 error (_("Bad range."));
1006 error (_("Bad range."));
1008 btinfo = require_btrace ();
1010 found = btrace_find_call_by_number (&begin, btinfo, low);
1012 error (_("Range out of bounds."));
1014 found = btrace_find_call_by_number (&end, btinfo, high);
1017 /* Silently truncate the range. */
1018 btrace_call_end (&end, btinfo);
1022 /* We want both begin and end to be inclusive. */
1023 btrace_call_next (&end, 1);
1026 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1027 btrace_set_call_history (btinfo, &begin, &end);
1029 do_cleanups (uiout_cleanup);
1032 /* The to_call_history_from method of target record-btrace. */
1035 record_btrace_call_history_from (struct target_ops *self,
1036 ULONGEST from, int size, int flags)
1038 ULONGEST begin, end, context;
1040 context = abs (size);
1042 error (_("Bad record function-call-history-size."));
1051 begin = from - context + 1;
1056 end = from + context - 1;
1058 /* Check for wrap-around. */
1063 record_btrace_call_history_range (self, begin, end, flags);
1066 /* The to_record_is_replaying method of target record-btrace. */
1069 record_btrace_is_replaying (struct target_ops *self)
1071 struct thread_info *tp;
1073 ALL_NON_EXITED_THREADS (tp)
1074 if (btrace_is_replaying (tp))
1080 /* The to_xfer_partial method of target record-btrace. */
1082 static enum target_xfer_status
1083 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1084 const char *annex, gdb_byte *readbuf,
1085 const gdb_byte *writebuf, ULONGEST offset,
1086 ULONGEST len, ULONGEST *xfered_len)
1088 struct target_ops *t;
1090 /* Filter out requests that don't make sense during replay. */
1091 if (replay_memory_access == replay_memory_access_read_only
1092 && !record_btrace_generating_corefile
1093 && record_btrace_is_replaying (ops))
1097 case TARGET_OBJECT_MEMORY:
1099 struct target_section *section;
1101 /* We do not allow writing memory in general. */
1102 if (writebuf != NULL)
1105 return TARGET_XFER_UNAVAILABLE;
1108 /* We allow reading readonly memory. */
1109 section = target_section_by_addr (ops, offset);
1110 if (section != NULL)
1112 /* Check if the section we found is readonly. */
1113 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1114 section->the_bfd_section)
1115 & SEC_READONLY) != 0)
1117 /* Truncate the request to fit into this section. */
1118 len = min (len, section->endaddr - offset);
1124 return TARGET_XFER_UNAVAILABLE;
1129 /* Forward the request. */
1131 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1132 offset, len, xfered_len);
1135 /* The to_insert_breakpoint method of target record-btrace. */
1138 record_btrace_insert_breakpoint (struct target_ops *ops,
1139 struct gdbarch *gdbarch,
1140 struct bp_target_info *bp_tgt)
1145 /* Inserting breakpoints requires accessing memory. Allow it for the
1146 duration of this function. */
1147 old = replay_memory_access;
1148 replay_memory_access = replay_memory_access_read_write;
1153 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1155 CATCH (except, RETURN_MASK_ALL)
1157 replay_memory_access = old;
1158 throw_exception (except);
1161 replay_memory_access = old;
1166 /* The to_remove_breakpoint method of target record-btrace. */
1169 record_btrace_remove_breakpoint (struct target_ops *ops,
1170 struct gdbarch *gdbarch,
1171 struct bp_target_info *bp_tgt)
1176 /* Removing breakpoints requires accessing memory. Allow it for the
1177 duration of this function. */
1178 old = replay_memory_access;
1179 replay_memory_access = replay_memory_access_read_write;
1184 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1186 CATCH (except, RETURN_MASK_ALL)
1188 replay_memory_access = old;
1189 throw_exception (except);
1192 replay_memory_access = old;
1197 /* The to_fetch_registers method of target record-btrace. */
1200 record_btrace_fetch_registers (struct target_ops *ops,
1201 struct regcache *regcache, int regno)
1203 struct btrace_insn_iterator *replay;
1204 struct thread_info *tp;
1206 tp = find_thread_ptid (inferior_ptid);
1207 gdb_assert (tp != NULL);
1209 replay = tp->btrace.replay;
1210 if (replay != NULL && !record_btrace_generating_corefile)
1212 const struct btrace_insn *insn;
1213 struct gdbarch *gdbarch;
1216 gdbarch = get_regcache_arch (regcache);
1217 pcreg = gdbarch_pc_regnum (gdbarch);
1221 /* We can only provide the PC register. */
1222 if (regno >= 0 && regno != pcreg)
1225 insn = btrace_insn_get (replay);
1226 gdb_assert (insn != NULL);
1228 regcache_raw_supply (regcache, regno, &insn->pc);
1232 struct target_ops *t = ops->beneath;
1234 t->to_fetch_registers (t, regcache, regno);
1238 /* The to_store_registers method of target record-btrace. */
1241 record_btrace_store_registers (struct target_ops *ops,
1242 struct regcache *regcache, int regno)
1244 struct target_ops *t;
1246 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1247 error (_("This record target does not allow writing registers."));
1249 gdb_assert (may_write_registers != 0);
1252 t->to_store_registers (t, regcache, regno);
1255 /* The to_prepare_to_store method of target record-btrace. */
1258 record_btrace_prepare_to_store (struct target_ops *ops,
1259 struct regcache *regcache)
1261 struct target_ops *t;
1263 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1267 t->to_prepare_to_store (t, regcache);
1270 /* The branch trace frame cache. */
1272 struct btrace_frame_cache
1275 struct thread_info *tp;
1277 /* The frame info. */
1278 struct frame_info *frame;
1280 /* The branch trace function segment. */
1281 const struct btrace_function *bfun;
1284 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1286 static htab_t bfcache;
1288 /* hash_f for htab_create_alloc of bfcache. */
1291 bfcache_hash (const void *arg)
1293 const struct btrace_frame_cache *cache = arg;
1295 return htab_hash_pointer (cache->frame);
1298 /* eq_f for htab_create_alloc of bfcache. */
1301 bfcache_eq (const void *arg1, const void *arg2)
1303 const struct btrace_frame_cache *cache1 = arg1;
1304 const struct btrace_frame_cache *cache2 = arg2;
1306 return cache1->frame == cache2->frame;
1309 /* Create a new btrace frame cache. */
1311 static struct btrace_frame_cache *
1312 bfcache_new (struct frame_info *frame)
1314 struct btrace_frame_cache *cache;
1317 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1318 cache->frame = frame;
1320 slot = htab_find_slot (bfcache, cache, INSERT);
1321 gdb_assert (*slot == NULL);
1327 /* Extract the branch trace function from a branch trace frame. */
1329 static const struct btrace_function *
1330 btrace_get_frame_function (struct frame_info *frame)
1332 const struct btrace_frame_cache *cache;
1333 const struct btrace_function *bfun;
1334 struct btrace_frame_cache pattern;
1337 pattern.frame = frame;
1339 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1347 /* Implement stop_reason method for record_btrace_frame_unwind. */
1349 static enum unwind_stop_reason
1350 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1353 const struct btrace_frame_cache *cache;
1354 const struct btrace_function *bfun;
1356 cache = *this_cache;
1358 gdb_assert (bfun != NULL);
1360 if (bfun->up == NULL)
1361 return UNWIND_UNAVAILABLE;
1363 return UNWIND_NO_REASON;
1366 /* Implement this_id method for record_btrace_frame_unwind. */
1369 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1370 struct frame_id *this_id)
1372 const struct btrace_frame_cache *cache;
1373 const struct btrace_function *bfun;
1374 CORE_ADDR code, special;
1376 cache = *this_cache;
1379 gdb_assert (bfun != NULL);
1381 while (bfun->segment.prev != NULL)
1382 bfun = bfun->segment.prev;
1384 code = get_frame_func (this_frame);
1385 special = bfun->number;
1387 *this_id = frame_id_build_unavailable_stack_special (code, special);
1389 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1390 btrace_get_bfun_name (cache->bfun),
1391 core_addr_to_string_nz (this_id->code_addr),
1392 core_addr_to_string_nz (this_id->special_addr));
1395 /* Implement prev_register method for record_btrace_frame_unwind. */
1397 static struct value *
1398 record_btrace_frame_prev_register (struct frame_info *this_frame,
1402 const struct btrace_frame_cache *cache;
1403 const struct btrace_function *bfun, *caller;
1404 const struct btrace_insn *insn;
1405 struct gdbarch *gdbarch;
1409 gdbarch = get_frame_arch (this_frame);
1410 pcreg = gdbarch_pc_regnum (gdbarch);
1411 if (pcreg < 0 || regnum != pcreg)
1412 throw_error (NOT_AVAILABLE_ERROR,
1413 _("Registers are not available in btrace record history"));
1415 cache = *this_cache;
1417 gdb_assert (bfun != NULL);
1421 throw_error (NOT_AVAILABLE_ERROR,
1422 _("No caller in btrace record history"));
1424 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1426 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1431 insn = VEC_last (btrace_insn_s, caller->insn);
1434 pc += gdb_insn_length (gdbarch, pc);
1437 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1438 btrace_get_bfun_name (bfun), bfun->level,
1439 core_addr_to_string_nz (pc));
1441 return frame_unwind_got_address (this_frame, regnum, pc);
1444 /* Implement sniffer method for record_btrace_frame_unwind. */
1447 record_btrace_frame_sniffer (const struct frame_unwind *self,
1448 struct frame_info *this_frame,
1451 const struct btrace_function *bfun;
1452 struct btrace_frame_cache *cache;
1453 struct thread_info *tp;
1454 struct frame_info *next;
1456 /* THIS_FRAME does not contain a reference to its thread. */
1457 tp = find_thread_ptid (inferior_ptid);
1458 gdb_assert (tp != NULL);
1461 next = get_next_frame (this_frame);
1464 const struct btrace_insn_iterator *replay;
1466 replay = tp->btrace.replay;
1468 bfun = replay->function;
1472 const struct btrace_function *callee;
1474 callee = btrace_get_frame_function (next);
1475 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1482 DEBUG ("[frame] sniffed frame for %s on level %d",
1483 btrace_get_bfun_name (bfun), bfun->level);
1485 /* This is our frame. Initialize the frame cache. */
1486 cache = bfcache_new (this_frame);
1490 *this_cache = cache;
1494 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1497 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1498 struct frame_info *this_frame,
1501 const struct btrace_function *bfun, *callee;
1502 struct btrace_frame_cache *cache;
1503 struct frame_info *next;
1505 next = get_next_frame (this_frame);
1509 callee = btrace_get_frame_function (next);
1513 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1520 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1521 btrace_get_bfun_name (bfun), bfun->level);
1523 /* This is our frame. Initialize the frame cache. */
1524 cache = bfcache_new (this_frame);
1525 cache->tp = find_thread_ptid (inferior_ptid);
1528 *this_cache = cache;
1533 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1535 struct btrace_frame_cache *cache;
1540 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1541 gdb_assert (slot != NULL);
1543 htab_remove_elt (bfcache, cache);
1546 /* btrace recording does not store previous memory content, neither the stack
1547 frames content. Any unwinding would return errorneous results as the stack
1548 contents no longer matches the changed PC value restored from history.
1549 Therefore this unwinder reports any possibly unwound registers as
1552 const struct frame_unwind record_btrace_frame_unwind =
1555 record_btrace_frame_unwind_stop_reason,
1556 record_btrace_frame_this_id,
1557 record_btrace_frame_prev_register,
1559 record_btrace_frame_sniffer,
1560 record_btrace_frame_dealloc_cache
1563 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1566 record_btrace_frame_unwind_stop_reason,
1567 record_btrace_frame_this_id,
1568 record_btrace_frame_prev_register,
1570 record_btrace_tailcall_frame_sniffer,
1571 record_btrace_frame_dealloc_cache
1574 /* Implement the to_get_unwinder method. */
1576 static const struct frame_unwind *
1577 record_btrace_to_get_unwinder (struct target_ops *self)
1579 return &record_btrace_frame_unwind;
1582 /* Implement the to_get_tailcall_unwinder method. */
1584 static const struct frame_unwind *
1585 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1587 return &record_btrace_tailcall_frame_unwind;
1590 /* Indicate that TP should be resumed according to FLAG. */
1593 record_btrace_resume_thread (struct thread_info *tp,
1594 enum btrace_thread_flag flag)
1596 struct btrace_thread_info *btinfo;
1598 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1600 btinfo = &tp->btrace;
1602 if ((btinfo->flags & BTHR_MOVE) != 0)
1603 error (_("Thread already moving."));
1605 /* Fetch the latest branch trace. */
1608 btinfo->flags |= flag;
1611 /* Find the thread to resume given a PTID. */
1613 static struct thread_info *
1614 record_btrace_find_resume_thread (ptid_t ptid)
1616 struct thread_info *tp;
1618 /* When asked to resume everything, we pick the current thread. */
1619 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1620 ptid = inferior_ptid;
1622 return find_thread_ptid (ptid);
1625 /* Start replaying a thread. */
1627 static struct btrace_insn_iterator *
1628 record_btrace_start_replaying (struct thread_info *tp)
1630 struct btrace_insn_iterator *replay;
1631 struct btrace_thread_info *btinfo;
1634 btinfo = &tp->btrace;
1637 /* We can't start replaying without trace. */
1638 if (btinfo->begin == NULL)
1641 /* Clear the executing flag to allow changes to the current frame.
1642 We are not actually running, yet. We just started a reverse execution
1643 command or a record goto command.
1644 For the latter, EXECUTING is false and this has no effect.
1645 For the former, EXECUTING is true and we're in to_wait, about to
1646 move the thread. Since we need to recompute the stack, we temporarily
1647 set EXECUTING to flase. */
1648 executing = is_executing (tp->ptid);
1649 set_executing (tp->ptid, 0);
1651 /* GDB stores the current frame_id when stepping in order to detects steps
1653 Since frames are computed differently when we're replaying, we need to
1654 recompute those stored frames and fix them up so we can still detect
1655 subroutines after we started replaying. */
1658 struct frame_info *frame;
1659 struct frame_id frame_id;
1660 int upd_step_frame_id, upd_step_stack_frame_id;
1662 /* The current frame without replaying - computed via normal unwind. */
1663 frame = get_current_frame ();
1664 frame_id = get_frame_id (frame);
1666 /* Check if we need to update any stepping-related frame id's. */
1667 upd_step_frame_id = frame_id_eq (frame_id,
1668 tp->control.step_frame_id);
1669 upd_step_stack_frame_id = frame_id_eq (frame_id,
1670 tp->control.step_stack_frame_id);
1672 /* We start replaying at the end of the branch trace. This corresponds
1673 to the current instruction. */
1674 replay = xmalloc (sizeof (*replay));
1675 btrace_insn_end (replay, btinfo);
1677 /* Skip gaps at the end of the trace. */
1678 while (btrace_insn_get (replay) == NULL)
1682 steps = btrace_insn_prev (replay, 1);
1684 error (_("No trace."));
1687 /* We're not replaying, yet. */
1688 gdb_assert (btinfo->replay == NULL);
1689 btinfo->replay = replay;
1691 /* Make sure we're not using any stale registers. */
1692 registers_changed_ptid (tp->ptid);
1694 /* The current frame with replaying - computed via btrace unwind. */
1695 frame = get_current_frame ();
1696 frame_id = get_frame_id (frame);
1698 /* Replace stepping related frames where necessary. */
1699 if (upd_step_frame_id)
1700 tp->control.step_frame_id = frame_id;
1701 if (upd_step_stack_frame_id)
1702 tp->control.step_stack_frame_id = frame_id;
1704 CATCH (except, RETURN_MASK_ALL)
1706 /* Restore the previous execution state. */
1707 set_executing (tp->ptid, executing);
1709 xfree (btinfo->replay);
1710 btinfo->replay = NULL;
1712 registers_changed_ptid (tp->ptid);
1714 throw_exception (except);
1718 /* Restore the previous execution state. */
1719 set_executing (tp->ptid, executing);
1724 /* Stop replaying a thread. */
1727 record_btrace_stop_replaying (struct thread_info *tp)
1729 struct btrace_thread_info *btinfo;
1731 btinfo = &tp->btrace;
1733 xfree (btinfo->replay);
1734 btinfo->replay = NULL;
1736 /* Make sure we're not leaving any stale registers. */
1737 registers_changed_ptid (tp->ptid);
1740 /* The to_resume method of target record-btrace. */
1743 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1744 enum gdb_signal signal)
1746 struct thread_info *tp, *other;
1747 enum btrace_thread_flag flag;
1749 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1751 /* Store the execution direction of the last resume. */
1752 record_btrace_resume_exec_dir = execution_direction;
1754 tp = record_btrace_find_resume_thread (ptid);
1756 error (_("Cannot find thread to resume."));
1758 /* Stop replaying other threads if the thread to resume is not replaying. */
1759 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1760 ALL_NON_EXITED_THREADS (other)
1761 record_btrace_stop_replaying (other);
1763 /* As long as we're not replaying, just forward the request. */
1764 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1767 return ops->to_resume (ops, ptid, step, signal);
1770 /* Compute the btrace thread flag for the requested move. */
1772 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1774 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1776 /* At the moment, we only move a single thread. We could also move
1777 all threads in parallel by single-stepping each resumed thread
1778 until the first runs into an event.
1779 When we do that, we would want to continue all other threads.
1780 For now, just resume one thread to not confuse to_wait. */
1781 record_btrace_resume_thread (tp, flag);
1783 /* We just indicate the resume intent here. The actual stepping happens in
1784 record_btrace_wait below. */
1786 /* Async support. */
1787 if (target_can_async_p ())
1790 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1794 /* Find a thread to move. */
1796 static struct thread_info *
1797 record_btrace_find_thread_to_move (ptid_t ptid)
1799 struct thread_info *tp;
1801 /* First check the parameter thread. */
1802 tp = find_thread_ptid (ptid);
1803 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1806 /* Otherwise, find one other thread that has been resumed. */
1807 ALL_NON_EXITED_THREADS (tp)
1808 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1814 /* Return a target_waitstatus indicating that we ran out of history. */
1816 static struct target_waitstatus
1817 btrace_step_no_history (void)
1819 struct target_waitstatus status;
1821 status.kind = TARGET_WAITKIND_NO_HISTORY;
1826 /* Return a target_waitstatus indicating that a step finished. */
1828 static struct target_waitstatus
1829 btrace_step_stopped (void)
1831 struct target_waitstatus status;
1833 status.kind = TARGET_WAITKIND_STOPPED;
1834 status.value.sig = GDB_SIGNAL_TRAP;
1839 /* Clear the record histories. */
1842 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1844 xfree (btinfo->insn_history);
1845 xfree (btinfo->call_history);
1847 btinfo->insn_history = NULL;
1848 btinfo->call_history = NULL;
1851 /* Step a single thread. */
1853 static struct target_waitstatus
1854 record_btrace_step_thread (struct thread_info *tp)
1856 struct btrace_insn_iterator *replay, end;
1857 struct btrace_thread_info *btinfo;
1858 struct address_space *aspace;
1859 struct inferior *inf;
1860 enum btrace_thread_flag flags;
1863 /* We can't step without an execution history. */
1864 if (btrace_is_empty (tp))
1865 return btrace_step_no_history ();
1867 btinfo = &tp->btrace;
1868 replay = btinfo->replay;
1870 flags = btinfo->flags & BTHR_MOVE;
1871 btinfo->flags &= ~BTHR_MOVE;
1873 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1878 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1881 /* We're done if we're not replaying. */
1883 return btrace_step_no_history ();
1885 /* Skip gaps during replay. */
1888 steps = btrace_insn_next (replay, 1);
1891 record_btrace_stop_replaying (tp);
1892 return btrace_step_no_history ();
1895 while (btrace_insn_get (replay) == NULL);
1897 /* Determine the end of the instruction trace. */
1898 btrace_insn_end (&end, btinfo);
1900 /* We stop replaying if we reached the end of the trace. */
1901 if (btrace_insn_cmp (replay, &end) == 0)
1902 record_btrace_stop_replaying (tp);
1904 return btrace_step_stopped ();
1907 /* Start replaying if we're not already doing so. */
1909 replay = record_btrace_start_replaying (tp);
1911 /* If we can't step any further, we reached the end of the history.
1912 Skip gaps during replay. */
1915 steps = btrace_insn_prev (replay, 1);
1917 return btrace_step_no_history ();
1920 while (btrace_insn_get (replay) == NULL);
1922 return btrace_step_stopped ();
1925 /* We're done if we're not replaying. */
1927 return btrace_step_no_history ();
1929 inf = find_inferior_ptid (tp->ptid);
1930 aspace = inf->aspace;
1932 /* Determine the end of the instruction trace. */
1933 btrace_insn_end (&end, btinfo);
1937 const struct btrace_insn *insn;
1939 /* Skip gaps during replay. */
1942 steps = btrace_insn_next (replay, 1);
1945 record_btrace_stop_replaying (tp);
1946 return btrace_step_no_history ();
1949 insn = btrace_insn_get (replay);
1951 while (insn == NULL);
1953 /* We stop replaying if we reached the end of the trace. */
1954 if (btrace_insn_cmp (replay, &end) == 0)
1956 record_btrace_stop_replaying (tp);
1957 return btrace_step_no_history ();
1960 DEBUG ("stepping %d (%s) ... %s", tp->num,
1961 target_pid_to_str (tp->ptid),
1962 core_addr_to_string_nz (insn->pc));
1964 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
1965 &btinfo->stop_reason))
1966 return btrace_step_stopped ();
1970 /* Start replaying if we're not already doing so. */
1972 replay = record_btrace_start_replaying (tp);
1974 inf = find_inferior_ptid (tp->ptid);
1975 aspace = inf->aspace;
1979 const struct btrace_insn *insn;
1981 /* If we can't step any further, we reached the end of the history.
1982 Skip gaps during replay. */
1985 steps = btrace_insn_prev (replay, 1);
1987 return btrace_step_no_history ();
1989 insn = btrace_insn_get (replay);
1991 while (insn == NULL);
1993 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1994 target_pid_to_str (tp->ptid),
1995 core_addr_to_string_nz (insn->pc));
1997 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
1998 &btinfo->stop_reason))
1999 return btrace_step_stopped ();
2004 /* The to_wait method of target record-btrace. */
2007 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2008 struct target_waitstatus *status, int options)
2010 struct thread_info *tp, *other;
2012 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2014 /* As long as we're not replaying, just forward the request. */
2015 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2018 return ops->to_wait (ops, ptid, status, options);
2021 /* Let's find a thread to move. */
2022 tp = record_btrace_find_thread_to_move (ptid);
2025 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2027 status->kind = TARGET_WAITKIND_IGNORE;
2028 return minus_one_ptid;
2031 /* We only move a single thread. We're not able to correlate threads. */
2032 *status = record_btrace_step_thread (tp);
2034 /* Stop all other threads. */
2036 ALL_NON_EXITED_THREADS (other)
2037 other->btrace.flags &= ~BTHR_MOVE;
2039 /* Start record histories anew from the current position. */
2040 record_btrace_clear_histories (&tp->btrace);
2042 /* We moved the replay position but did not update registers. */
2043 registers_changed_ptid (tp->ptid);
2048 /* The to_can_execute_reverse method of target record-btrace. */
2051 record_btrace_can_execute_reverse (struct target_ops *self)
2056 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2059 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2061 if (record_btrace_is_replaying (ops))
2063 struct thread_info *tp = inferior_thread ();
2065 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2068 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2071 /* The to_supports_stopped_by_sw_breakpoint method of target
2075 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2077 if (record_btrace_is_replaying (ops))
2080 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2083 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2086 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2088 if (record_btrace_is_replaying (ops))
2090 struct thread_info *tp = inferior_thread ();
2092 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2095 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2098 /* The to_supports_stopped_by_hw_breakpoint method of target
2102 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2104 if (record_btrace_is_replaying (ops))
2107 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2110 /* The to_update_thread_list method of target record-btrace. */
2113 record_btrace_update_thread_list (struct target_ops *ops)
2115 /* We don't add or remove threads during replay. */
2116 if (record_btrace_is_replaying (ops))
2119 /* Forward the request. */
2121 ops->to_update_thread_list (ops);
2124 /* The to_thread_alive method of target record-btrace. */
2127 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2129 /* We don't add or remove threads during replay. */
2130 if (record_btrace_is_replaying (ops))
2131 return find_thread_ptid (ptid) != NULL;
2133 /* Forward the request. */
2135 return ops->to_thread_alive (ops, ptid);
2138 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2142 record_btrace_set_replay (struct thread_info *tp,
2143 const struct btrace_insn_iterator *it)
2145 struct btrace_thread_info *btinfo;
2147 btinfo = &tp->btrace;
2149 if (it == NULL || it->function == NULL)
2150 record_btrace_stop_replaying (tp);
2153 if (btinfo->replay == NULL)
2154 record_btrace_start_replaying (tp);
2155 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2158 *btinfo->replay = *it;
2159 registers_changed_ptid (tp->ptid);
2162 /* Start anew from the new replay position. */
2163 record_btrace_clear_histories (btinfo);
2166 /* The to_goto_record_begin method of target record-btrace. */
2169 record_btrace_goto_begin (struct target_ops *self)
2171 struct thread_info *tp;
2172 struct btrace_insn_iterator begin;
2174 tp = require_btrace_thread ();
2176 btrace_insn_begin (&begin, &tp->btrace);
2177 record_btrace_set_replay (tp, &begin);
2179 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2182 /* The to_goto_record_end method of target record-btrace. */
2185 record_btrace_goto_end (struct target_ops *ops)
2187 struct thread_info *tp;
2189 tp = require_btrace_thread ();
2191 record_btrace_set_replay (tp, NULL);
2193 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2196 /* The to_goto_record method of target record-btrace. */
2199 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2201 struct thread_info *tp;
2202 struct btrace_insn_iterator it;
2203 unsigned int number;
2208 /* Check for wrap-arounds. */
2210 error (_("Instruction number out of range."));
2212 tp = require_btrace_thread ();
2214 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2216 error (_("No such instruction."));
2218 record_btrace_set_replay (tp, &it);
2220 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2223 /* The to_execution_direction target method. */
2225 static enum exec_direction_kind
2226 record_btrace_execution_direction (struct target_ops *self)
2228 return record_btrace_resume_exec_dir;
2231 /* The to_prepare_to_generate_core target method. */
2234 record_btrace_prepare_to_generate_core (struct target_ops *self)
2236 record_btrace_generating_corefile = 1;
2239 /* The to_done_generating_core target method. */
2242 record_btrace_done_generating_core (struct target_ops *self)
2244 record_btrace_generating_corefile = 0;
2247 /* Initialize the record-btrace target ops. */
2250 init_record_btrace_ops (void)
2252 struct target_ops *ops;
2254 ops = &record_btrace_ops;
2255 ops->to_shortname = "record-btrace";
2256 ops->to_longname = "Branch tracing target";
2257 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2258 ops->to_open = record_btrace_open;
2259 ops->to_close = record_btrace_close;
2260 ops->to_async = record_btrace_async;
2261 ops->to_detach = record_detach;
2262 ops->to_disconnect = record_disconnect;
2263 ops->to_mourn_inferior = record_mourn_inferior;
2264 ops->to_kill = record_kill;
2265 ops->to_stop_recording = record_btrace_stop_recording;
2266 ops->to_info_record = record_btrace_info;
2267 ops->to_insn_history = record_btrace_insn_history;
2268 ops->to_insn_history_from = record_btrace_insn_history_from;
2269 ops->to_insn_history_range = record_btrace_insn_history_range;
2270 ops->to_call_history = record_btrace_call_history;
2271 ops->to_call_history_from = record_btrace_call_history_from;
2272 ops->to_call_history_range = record_btrace_call_history_range;
2273 ops->to_record_is_replaying = record_btrace_is_replaying;
2274 ops->to_xfer_partial = record_btrace_xfer_partial;
2275 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2276 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2277 ops->to_fetch_registers = record_btrace_fetch_registers;
2278 ops->to_store_registers = record_btrace_store_registers;
2279 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2280 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2281 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2282 ops->to_resume = record_btrace_resume;
2283 ops->to_wait = record_btrace_wait;
2284 ops->to_update_thread_list = record_btrace_update_thread_list;
2285 ops->to_thread_alive = record_btrace_thread_alive;
2286 ops->to_goto_record_begin = record_btrace_goto_begin;
2287 ops->to_goto_record_end = record_btrace_goto_end;
2288 ops->to_goto_record = record_btrace_goto;
2289 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2290 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2291 ops->to_supports_stopped_by_sw_breakpoint
2292 = record_btrace_supports_stopped_by_sw_breakpoint;
2293 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2294 ops->to_supports_stopped_by_hw_breakpoint
2295 = record_btrace_supports_stopped_by_hw_breakpoint;
2296 ops->to_execution_direction = record_btrace_execution_direction;
2297 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2298 ops->to_done_generating_core = record_btrace_done_generating_core;
2299 ops->to_stratum = record_stratum;
2300 ops->to_magic = OPS_MAGIC;
2303 /* Start recording in BTS format. */
2306 cmd_record_btrace_bts_start (char *args, int from_tty)
2309 if (args != NULL && *args != 0)
2310 error (_("Invalid argument."));
2312 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2316 execute_command ("target record-btrace", from_tty);
2318 CATCH (exception, RETURN_MASK_ALL)
2320 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2321 throw_exception (exception);
2326 /* Alias for "target record". */
2329 cmd_record_btrace_start (char *args, int from_tty)
2332 if (args != NULL && *args != 0)
2333 error (_("Invalid argument."));
2335 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2339 execute_command ("target record-btrace", from_tty);
2341 CATCH (exception, RETURN_MASK_ALL)
2343 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2344 throw_exception (exception);
2349 /* The "set record btrace" command. */
2352 cmd_set_record_btrace (char *args, int from_tty)
2354 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2357 /* The "show record btrace" command. */
2360 cmd_show_record_btrace (char *args, int from_tty)
2362 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2365 /* The "show record btrace replay-memory-access" command. */
2368 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2369 struct cmd_list_element *c, const char *value)
2371 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2372 replay_memory_access);
2375 /* The "set record btrace bts" command. */
2378 cmd_set_record_btrace_bts (char *args, int from_tty)
2380 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2381 "by an apporpriate subcommand.\n"));
2382 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2383 all_commands, gdb_stdout);
2386 /* The "show record btrace bts" command. */
2389 cmd_show_record_btrace_bts (char *args, int from_tty)
2391 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2394 void _initialize_record_btrace (void);
2396 /* Initialize btrace commands. */
2399 _initialize_record_btrace (void)
2401 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2402 _("Start branch trace recording."), &record_btrace_cmdlist,
2403 "record btrace ", 0, &record_cmdlist);
2404 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2406 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2408 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2409 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2410 This format may not be available on all processors."),
2411 &record_btrace_cmdlist);
2412 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2414 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2415 _("Set record options"), &set_record_btrace_cmdlist,
2416 "set record btrace ", 0, &set_record_cmdlist);
2418 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2419 _("Show record options"), &show_record_btrace_cmdlist,
2420 "show record btrace ", 0, &show_record_cmdlist);
2422 add_setshow_enum_cmd ("replay-memory-access", no_class,
2423 replay_memory_access_types, &replay_memory_access, _("\
2424 Set what memory accesses are allowed during replay."), _("\
2425 Show what memory accesses are allowed during replay."),
2426 _("Default is READ-ONLY.\n\n\
2427 The btrace record target does not trace data.\n\
2428 The memory therefore corresponds to the live target and not \
2429 to the current replay position.\n\n\
2430 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2431 When READ-WRITE, allow accesses to read-only and read-write memory during \
2433 NULL, cmd_show_replay_memory_access,
2434 &set_record_btrace_cmdlist,
2435 &show_record_btrace_cmdlist);
2437 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2438 _("Set record btrace bts options"),
2439 &set_record_btrace_bts_cmdlist,
2440 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2442 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2443 _("Show record btrace bts options"),
2444 &show_record_btrace_bts_cmdlist,
2445 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2447 add_setshow_uinteger_cmd ("buffer-size", no_class,
2448 &record_btrace_conf.bts.size,
2449 _("Set the record/replay bts buffer size."),
2450 _("Show the record/replay bts buffer size."), _("\
2451 When starting recording request a trace buffer of this size. \
2452 The actual buffer size may differ from the requested size. \
2453 Use \"info record\" to see the actual buffer size.\n\n\
2454 Bigger buffers allow longer recording but also take more time to process \
2455 the recorded execution trace.\n\n\
2456 The trace buffer size may not be changed while recording."), NULL, NULL,
2457 &set_record_btrace_bts_cmdlist,
2458 &show_record_btrace_bts_cmdlist);
2460 init_record_btrace_ops ();
2461 add_target (&record_btrace_ops);
2463 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2466 record_btrace_conf.bts.size = 64 * 1024;