1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83 /* Command lists for "set/show record btrace pt". */
84 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
85 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
90 #define DEBUG(msg, args...) \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
100 /* Update the branch trace for the current thread and return a pointer to its
103 Throws an error if there is no thread or no trace. This function never
106 static struct thread_info *
107 require_btrace_thread (void)
109 struct thread_info *tp;
113 tp = find_thread_ptid (inferior_ptid);
115 error (_("No thread."));
119 if (btrace_is_empty (tp))
120 error (_("No trace."));
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
128 Throws an error if there is no thread or no trace. This function never
131 static struct btrace_thread_info *
132 require_btrace (void)
134 struct thread_info *tp;
136 tp = require_btrace_thread ();
141 /* Enable branch tracing for one thread. Warn on errors. */
144 record_btrace_enable_warn (struct thread_info *tp)
148 btrace_enable (tp, &record_btrace_conf);
150 CATCH (error, RETURN_MASK_ERROR)
152 warning ("%s", error.message);
157 /* Callback function to disable branch tracing for one thread. */
160 record_btrace_disable_callback (void *arg)
162 struct thread_info *tp;
169 /* Enable automatic tracing of new threads. */
172 record_btrace_auto_enable (void)
174 DEBUG ("attach thread observer");
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
180 /* Disable automatic tracing of new threads. */
183 record_btrace_auto_disable (void)
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
189 DEBUG ("detach thread observer");
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
195 /* The record-btrace async event handler function. */
198 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 inferior_event_handler (INF_REG_EVENT, NULL);
203 /* The to_open method of target record-btrace. */
206 record_btrace_open (const char *args, int from_tty)
208 struct cleanup *disable_chain;
209 struct thread_info *tp;
215 if (!target_has_execution)
216 error (_("The program is not being run."));
219 error (_("Record btrace can't debug inferior in non-stop mode."));
221 gdb_assert (record_btrace_thread_observer == NULL);
223 disable_chain = make_cleanup (null_cleanup, NULL);
224 ALL_NON_EXITED_THREADS (tp)
225 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
227 btrace_enable (tp, &record_btrace_conf);
229 make_cleanup (record_btrace_disable_callback, tp);
232 record_btrace_auto_enable ();
234 push_target (&record_btrace_ops);
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event,
239 record_btrace_generating_corefile = 0;
241 observer_notify_record_changed (current_inferior (), 1);
243 discard_cleanups (disable_chain);
246 /* The to_stop_recording method of target record-btrace. */
249 record_btrace_stop_recording (struct target_ops *self)
251 struct thread_info *tp;
253 DEBUG ("stop recording");
255 record_btrace_auto_disable ();
257 ALL_NON_EXITED_THREADS (tp)
258 if (tp->btrace.target != NULL)
262 /* The to_close method of target record-btrace. */
265 record_btrace_close (struct target_ops *self)
267 struct thread_info *tp;
269 if (record_btrace_async_inferior_event_handler != NULL)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
278 ALL_NON_EXITED_THREADS (tp)
279 btrace_teardown (tp);
282 /* The to_async method of target record-btrace. */
285 record_btrace_async (struct target_ops *ops, int enable)
288 mark_async_event_handler (record_btrace_async_inferior_event_handler);
290 clear_async_event_handler (record_btrace_async_inferior_event_handler);
292 ops->beneath->to_async (ops->beneath, enable);
295 /* Adjusts the size and returns a human readable size suffix. */
298 record_btrace_adjust_size (unsigned int *size)
304 if ((sz & ((1u << 30) - 1)) == 0)
309 else if ((sz & ((1u << 20) - 1)) == 0)
314 else if ((sz & ((1u << 10) - 1)) == 0)
323 /* Print a BTS configuration. */
326 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
334 suffix = record_btrace_adjust_size (&size);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
339 /* Print an Intel(R) Processor Trace configuration. */
342 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
350 suffix = record_btrace_adjust_size (&size);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
355 /* Print a branch tracing configuration. */
358 record_btrace_print_conf (const struct btrace_config *conf)
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf->format));
363 switch (conf->format)
365 case BTRACE_FORMAT_NONE:
368 case BTRACE_FORMAT_BTS:
369 record_btrace_print_bts_conf (&conf->bts);
372 case BTRACE_FORMAT_PT:
373 record_btrace_print_pt_conf (&conf->pt);
377 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
380 /* The to_info_record method of target record-btrace. */
383 record_btrace_info (struct target_ops *self)
385 struct btrace_thread_info *btinfo;
386 const struct btrace_config *conf;
387 struct thread_info *tp;
388 unsigned int insns, calls, gaps;
392 tp = find_thread_ptid (inferior_ptid);
394 error (_("No thread."));
396 btinfo = &tp->btrace;
398 conf = btrace_conf (btinfo);
400 record_btrace_print_conf (conf);
408 if (!btrace_is_empty (tp))
410 struct btrace_call_iterator call;
411 struct btrace_insn_iterator insn;
413 btrace_call_end (&call, btinfo);
414 btrace_call_prev (&call, 1);
415 calls = btrace_call_number (&call);
417 btrace_insn_end (&insn, btinfo);
419 insns = btrace_insn_number (&insn);
422 /* The last instruction does not really belong to the trace. */
429 /* Skip gaps at the end. */
432 steps = btrace_insn_prev (&insn, 1);
436 insns = btrace_insn_number (&insn);
441 gaps = btinfo->ngaps;
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns, calls, gaps,
446 tp->num, target_pid_to_str (tp->ptid));
448 if (btrace_is_replaying (tp))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo->replay));
453 /* Print a decode error. */
456 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457 enum btrace_format format)
462 errstr = _("unknown");
470 case BTRACE_FORMAT_BTS:
476 case BDE_BTS_OVERFLOW:
477 errstr = _("instruction overflow");
480 case BDE_BTS_INSN_SIZE:
481 errstr = _("unknown instruction");
486 #if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT:
490 case BDE_PT_USER_QUIT:
492 errstr = _("trace decode cancelled");
495 case BDE_PT_DISABLED:
497 errstr = _("disabled");
500 case BDE_PT_OVERFLOW:
502 errstr = _("overflow");
507 errstr = pt_errstr (pt_errcode (errcode));
511 #endif /* defined (HAVE_LIBIPT) */
514 ui_out_text (uiout, _("["));
517 ui_out_text (uiout, _("decode error ("));
518 ui_out_field_int (uiout, "errcode", errcode);
519 ui_out_text (uiout, _("): "));
521 ui_out_text (uiout, errstr);
522 ui_out_text (uiout, _("]\n"));
525 /* Print an unsigned int. */
528 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
530 ui_out_field_fmt (uiout, fld, "%u", val);
533 /* Disassemble a section of the recorded instruction trace. */
536 btrace_insn_history (struct ui_out *uiout,
537 const struct btrace_thread_info *btinfo,
538 const struct btrace_insn_iterator *begin,
539 const struct btrace_insn_iterator *end, int flags)
541 struct gdbarch *gdbarch;
542 struct btrace_insn_iterator it;
544 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545 btrace_insn_number (end));
547 gdbarch = target_gdbarch ();
549 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
551 const struct btrace_insn *insn;
553 insn = btrace_insn_get (&it);
555 /* A NULL instruction indicates a gap in the trace. */
558 const struct btrace_config *conf;
560 conf = btrace_conf (btinfo);
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf != NULL);
565 btrace_ui_out_decode_error (uiout, it.function->errcode,
572 /* We may add a speculation prefix later. We use the same space
573 that is used for the pc prefix. */
574 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
575 strncpy (prefix, pc_prefix (insn->pc), 3);
584 /* Print the instruction index. */
585 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
586 ui_out_text (uiout, "\t");
588 /* Indicate speculative execution by a leading '?'. */
589 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
592 /* Print the prefix; we tell gdb_disassembly below to omit it. */
593 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
595 /* Disassembly with '/m' flag may not produce the expected result.
597 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
598 1, insn->pc, insn->pc + 1);
603 /* The to_insn_history method of target record-btrace. */
606 record_btrace_insn_history (struct target_ops *self, int size, int flags)
608 struct btrace_thread_info *btinfo;
609 struct btrace_insn_history *history;
610 struct btrace_insn_iterator begin, end;
611 struct cleanup *uiout_cleanup;
612 struct ui_out *uiout;
613 unsigned int context, covered;
615 uiout = current_uiout;
616 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
618 context = abs (size);
620 error (_("Bad record instruction-history-size."));
622 btinfo = require_btrace ();
623 history = btinfo->insn_history;
626 struct btrace_insn_iterator *replay;
628 DEBUG ("insn-history (0x%x): %d", flags, size);
630 /* If we're replaying, we start at the replay position. Otherwise, we
631 start at the tail of the trace. */
632 replay = btinfo->replay;
636 btrace_insn_end (&begin, btinfo);
638 /* We start from here and expand in the requested direction. Then we
639 expand in the other direction, as well, to fill up any remaining
644 /* We want the current position covered, as well. */
645 covered = btrace_insn_next (&end, 1);
646 covered += btrace_insn_prev (&begin, context - covered);
647 covered += btrace_insn_next (&end, context - covered);
651 covered = btrace_insn_next (&end, context);
652 covered += btrace_insn_prev (&begin, context - covered);
657 begin = history->begin;
660 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
661 btrace_insn_number (&begin), btrace_insn_number (&end));
666 covered = btrace_insn_prev (&begin, context);
671 covered = btrace_insn_next (&end, context);
676 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
680 printf_unfiltered (_("At the start of the branch trace record.\n"));
682 printf_unfiltered (_("At the end of the branch trace record.\n"));
685 btrace_set_insn_history (btinfo, &begin, &end);
686 do_cleanups (uiout_cleanup);
689 /* The to_insn_history_range method of target record-btrace. */
692 record_btrace_insn_history_range (struct target_ops *self,
693 ULONGEST from, ULONGEST to, int flags)
695 struct btrace_thread_info *btinfo;
696 struct btrace_insn_history *history;
697 struct btrace_insn_iterator begin, end;
698 struct cleanup *uiout_cleanup;
699 struct ui_out *uiout;
700 unsigned int low, high;
703 uiout = current_uiout;
704 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
709 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
711 /* Check for wrap-arounds. */
712 if (low != from || high != to)
713 error (_("Bad range."));
716 error (_("Bad range."));
718 btinfo = require_btrace ();
720 found = btrace_find_insn_by_number (&begin, btinfo, low);
722 error (_("Range out of bounds."));
724 found = btrace_find_insn_by_number (&end, btinfo, high);
727 /* Silently truncate the range. */
728 btrace_insn_end (&end, btinfo);
732 /* We want both begin and end to be inclusive. */
733 btrace_insn_next (&end, 1);
736 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
737 btrace_set_insn_history (btinfo, &begin, &end);
739 do_cleanups (uiout_cleanup);
742 /* The to_insn_history_from method of target record-btrace. */
745 record_btrace_insn_history_from (struct target_ops *self,
746 ULONGEST from, int size, int flags)
748 ULONGEST begin, end, context;
750 context = abs (size);
752 error (_("Bad record instruction-history-size."));
761 begin = from - context + 1;
766 end = from + context - 1;
768 /* Check for wrap-around. */
773 record_btrace_insn_history_range (self, begin, end, flags);
776 /* Print the instruction number range for a function call history line. */
779 btrace_call_history_insn_range (struct ui_out *uiout,
780 const struct btrace_function *bfun)
782 unsigned int begin, end, size;
784 size = VEC_length (btrace_insn_s, bfun->insn);
785 gdb_assert (size > 0);
787 begin = bfun->insn_offset;
788 end = begin + size - 1;
790 ui_out_field_uint (uiout, "insn begin", begin);
791 ui_out_text (uiout, ",");
792 ui_out_field_uint (uiout, "insn end", end);
795 /* Compute the lowest and highest source line for the instructions in BFUN
796 and return them in PBEGIN and PEND.
797 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
798 result from inlining or macro expansion. */
801 btrace_compute_src_line_range (const struct btrace_function *bfun,
802 int *pbegin, int *pend)
804 struct btrace_insn *insn;
805 struct symtab *symtab;
817 symtab = symbol_symtab (sym);
819 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
821 struct symtab_and_line sal;
823 sal = find_pc_line (insn->pc, 0);
824 if (sal.symtab != symtab || sal.line == 0)
827 begin = min (begin, sal.line);
828 end = max (end, sal.line);
836 /* Print the source line information for a function call history line. */
839 btrace_call_history_src_line (struct ui_out *uiout,
840 const struct btrace_function *bfun)
849 ui_out_field_string (uiout, "file",
850 symtab_to_filename_for_display (symbol_symtab (sym)));
852 btrace_compute_src_line_range (bfun, &begin, &end);
856 ui_out_text (uiout, ":");
857 ui_out_field_int (uiout, "min line", begin);
862 ui_out_text (uiout, ",");
863 ui_out_field_int (uiout, "max line", end);
866 /* Get the name of a branch trace function. */
869 btrace_get_bfun_name (const struct btrace_function *bfun)
871 struct minimal_symbol *msym;
881 return SYMBOL_PRINT_NAME (sym);
882 else if (msym != NULL)
883 return MSYMBOL_PRINT_NAME (msym);
888 /* Disassemble a section of the recorded function trace. */
891 btrace_call_history (struct ui_out *uiout,
892 const struct btrace_thread_info *btinfo,
893 const struct btrace_call_iterator *begin,
894 const struct btrace_call_iterator *end,
895 enum record_print_flag flags)
897 struct btrace_call_iterator it;
899 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
900 btrace_call_number (end));
902 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
904 const struct btrace_function *bfun;
905 struct minimal_symbol *msym;
908 bfun = btrace_call_get (&it);
912 /* Print the function index. */
913 ui_out_field_uint (uiout, "index", bfun->number);
914 ui_out_text (uiout, "\t");
916 /* Indicate gaps in the trace. */
917 if (bfun->errcode != 0)
919 const struct btrace_config *conf;
921 conf = btrace_conf (btinfo);
923 /* We have trace so we must have a configuration. */
924 gdb_assert (conf != NULL);
926 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
931 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
933 int level = bfun->level + btinfo->level, i;
935 for (i = 0; i < level; ++i)
936 ui_out_text (uiout, " ");
940 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
941 else if (msym != NULL)
942 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
943 else if (!ui_out_is_mi_like_p (uiout))
944 ui_out_field_string (uiout, "function", "??");
946 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
948 ui_out_text (uiout, _("\tinst "));
949 btrace_call_history_insn_range (uiout, bfun);
952 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
954 ui_out_text (uiout, _("\tat "));
955 btrace_call_history_src_line (uiout, bfun);
958 ui_out_text (uiout, "\n");
962 /* The to_call_history method of target record-btrace. */
965 record_btrace_call_history (struct target_ops *self, int size, int flags)
967 struct btrace_thread_info *btinfo;
968 struct btrace_call_history *history;
969 struct btrace_call_iterator begin, end;
970 struct cleanup *uiout_cleanup;
971 struct ui_out *uiout;
972 unsigned int context, covered;
974 uiout = current_uiout;
975 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
977 context = abs (size);
979 error (_("Bad record function-call-history-size."));
981 btinfo = require_btrace ();
982 history = btinfo->call_history;
985 struct btrace_insn_iterator *replay;
987 DEBUG ("call-history (0x%x): %d", flags, size);
989 /* If we're replaying, we start at the replay position. Otherwise, we
990 start at the tail of the trace. */
991 replay = btinfo->replay;
994 begin.function = replay->function;
995 begin.btinfo = btinfo;
998 btrace_call_end (&begin, btinfo);
1000 /* We start from here and expand in the requested direction. Then we
1001 expand in the other direction, as well, to fill up any remaining
1006 /* We want the current position covered, as well. */
1007 covered = btrace_call_next (&end, 1);
1008 covered += btrace_call_prev (&begin, context - covered);
1009 covered += btrace_call_next (&end, context - covered);
1013 covered = btrace_call_next (&end, context);
1014 covered += btrace_call_prev (&begin, context- covered);
1019 begin = history->begin;
1022 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1023 btrace_call_number (&begin), btrace_call_number (&end));
1028 covered = btrace_call_prev (&begin, context);
1033 covered = btrace_call_next (&end, context);
1038 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1042 printf_unfiltered (_("At the start of the branch trace record.\n"));
1044 printf_unfiltered (_("At the end of the branch trace record.\n"));
1047 btrace_set_call_history (btinfo, &begin, &end);
1048 do_cleanups (uiout_cleanup);
1051 /* The to_call_history_range method of target record-btrace. */
1054 record_btrace_call_history_range (struct target_ops *self,
1055 ULONGEST from, ULONGEST to, int flags)
1057 struct btrace_thread_info *btinfo;
1058 struct btrace_call_history *history;
1059 struct btrace_call_iterator begin, end;
1060 struct cleanup *uiout_cleanup;
1061 struct ui_out *uiout;
1062 unsigned int low, high;
1065 uiout = current_uiout;
1066 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1071 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1073 /* Check for wrap-arounds. */
1074 if (low != from || high != to)
1075 error (_("Bad range."));
1078 error (_("Bad range."));
1080 btinfo = require_btrace ();
1082 found = btrace_find_call_by_number (&begin, btinfo, low);
1084 error (_("Range out of bounds."));
1086 found = btrace_find_call_by_number (&end, btinfo, high);
1089 /* Silently truncate the range. */
1090 btrace_call_end (&end, btinfo);
1094 /* We want both begin and end to be inclusive. */
1095 btrace_call_next (&end, 1);
1098 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1099 btrace_set_call_history (btinfo, &begin, &end);
1101 do_cleanups (uiout_cleanup);
1104 /* The to_call_history_from method of target record-btrace. */
1107 record_btrace_call_history_from (struct target_ops *self,
1108 ULONGEST from, int size, int flags)
1110 ULONGEST begin, end, context;
1112 context = abs (size);
1114 error (_("Bad record function-call-history-size."));
1123 begin = from - context + 1;
1128 end = from + context - 1;
1130 /* Check for wrap-around. */
1135 record_btrace_call_history_range (self, begin, end, flags);
1138 /* The to_record_is_replaying method of target record-btrace. */
1141 record_btrace_is_replaying (struct target_ops *self)
1143 struct thread_info *tp;
1145 ALL_NON_EXITED_THREADS (tp)
1146 if (btrace_is_replaying (tp))
1152 /* The to_xfer_partial method of target record-btrace. */
1154 static enum target_xfer_status
1155 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1156 const char *annex, gdb_byte *readbuf,
1157 const gdb_byte *writebuf, ULONGEST offset,
1158 ULONGEST len, ULONGEST *xfered_len)
1160 struct target_ops *t;
1162 /* Filter out requests that don't make sense during replay. */
1163 if (replay_memory_access == replay_memory_access_read_only
1164 && !record_btrace_generating_corefile
1165 && record_btrace_is_replaying (ops))
1169 case TARGET_OBJECT_MEMORY:
1171 struct target_section *section;
1173 /* We do not allow writing memory in general. */
1174 if (writebuf != NULL)
1177 return TARGET_XFER_UNAVAILABLE;
1180 /* We allow reading readonly memory. */
1181 section = target_section_by_addr (ops, offset);
1182 if (section != NULL)
1184 /* Check if the section we found is readonly. */
1185 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1186 section->the_bfd_section)
1187 & SEC_READONLY) != 0)
1189 /* Truncate the request to fit into this section. */
1190 len = min (len, section->endaddr - offset);
1196 return TARGET_XFER_UNAVAILABLE;
1201 /* Forward the request. */
1203 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1204 offset, len, xfered_len);
1207 /* The to_insert_breakpoint method of target record-btrace. */
1210 record_btrace_insert_breakpoint (struct target_ops *ops,
1211 struct gdbarch *gdbarch,
1212 struct bp_target_info *bp_tgt)
1217 /* Inserting breakpoints requires accessing memory. Allow it for the
1218 duration of this function. */
1219 old = replay_memory_access;
1220 replay_memory_access = replay_memory_access_read_write;
1225 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1227 CATCH (except, RETURN_MASK_ALL)
1229 replay_memory_access = old;
1230 throw_exception (except);
1233 replay_memory_access = old;
1238 /* The to_remove_breakpoint method of target record-btrace. */
1241 record_btrace_remove_breakpoint (struct target_ops *ops,
1242 struct gdbarch *gdbarch,
1243 struct bp_target_info *bp_tgt)
1248 /* Removing breakpoints requires accessing memory. Allow it for the
1249 duration of this function. */
1250 old = replay_memory_access;
1251 replay_memory_access = replay_memory_access_read_write;
1256 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1258 CATCH (except, RETURN_MASK_ALL)
1260 replay_memory_access = old;
1261 throw_exception (except);
1264 replay_memory_access = old;
1269 /* The to_fetch_registers method of target record-btrace. */
1272 record_btrace_fetch_registers (struct target_ops *ops,
1273 struct regcache *regcache, int regno)
1275 struct btrace_insn_iterator *replay;
1276 struct thread_info *tp;
1278 tp = find_thread_ptid (inferior_ptid);
1279 gdb_assert (tp != NULL);
1281 replay = tp->btrace.replay;
1282 if (replay != NULL && !record_btrace_generating_corefile)
1284 const struct btrace_insn *insn;
1285 struct gdbarch *gdbarch;
1288 gdbarch = get_regcache_arch (regcache);
1289 pcreg = gdbarch_pc_regnum (gdbarch);
1293 /* We can only provide the PC register. */
1294 if (regno >= 0 && regno != pcreg)
1297 insn = btrace_insn_get (replay);
1298 gdb_assert (insn != NULL);
1300 regcache_raw_supply (regcache, regno, &insn->pc);
1304 struct target_ops *t = ops->beneath;
1306 t->to_fetch_registers (t, regcache, regno);
1310 /* The to_store_registers method of target record-btrace. */
1313 record_btrace_store_registers (struct target_ops *ops,
1314 struct regcache *regcache, int regno)
1316 struct target_ops *t;
1318 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1319 error (_("This record target does not allow writing registers."));
1321 gdb_assert (may_write_registers != 0);
1324 t->to_store_registers (t, regcache, regno);
1327 /* The to_prepare_to_store method of target record-btrace. */
1330 record_btrace_prepare_to_store (struct target_ops *ops,
1331 struct regcache *regcache)
1333 struct target_ops *t;
1335 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1339 t->to_prepare_to_store (t, regcache);
1342 /* The branch trace frame cache. */
1344 struct btrace_frame_cache
1347 struct thread_info *tp;
1349 /* The frame info. */
1350 struct frame_info *frame;
1352 /* The branch trace function segment. */
1353 const struct btrace_function *bfun;
1356 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1358 static htab_t bfcache;
1360 /* hash_f for htab_create_alloc of bfcache. */
1363 bfcache_hash (const void *arg)
1365 const struct btrace_frame_cache *cache = arg;
1367 return htab_hash_pointer (cache->frame);
1370 /* eq_f for htab_create_alloc of bfcache. */
1373 bfcache_eq (const void *arg1, const void *arg2)
1375 const struct btrace_frame_cache *cache1 = arg1;
1376 const struct btrace_frame_cache *cache2 = arg2;
1378 return cache1->frame == cache2->frame;
1381 /* Create a new btrace frame cache. */
1383 static struct btrace_frame_cache *
1384 bfcache_new (struct frame_info *frame)
1386 struct btrace_frame_cache *cache;
1389 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1390 cache->frame = frame;
1392 slot = htab_find_slot (bfcache, cache, INSERT);
1393 gdb_assert (*slot == NULL);
1399 /* Extract the branch trace function from a branch trace frame. */
1401 static const struct btrace_function *
1402 btrace_get_frame_function (struct frame_info *frame)
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406 struct btrace_frame_cache pattern;
1409 pattern.frame = frame;
1411 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1419 /* Implement stop_reason method for record_btrace_frame_unwind. */
1421 static enum unwind_stop_reason
1422 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1425 const struct btrace_frame_cache *cache;
1426 const struct btrace_function *bfun;
1428 cache = *this_cache;
1430 gdb_assert (bfun != NULL);
1432 if (bfun->up == NULL)
1433 return UNWIND_UNAVAILABLE;
1435 return UNWIND_NO_REASON;
1438 /* Implement this_id method for record_btrace_frame_unwind. */
1441 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1442 struct frame_id *this_id)
1444 const struct btrace_frame_cache *cache;
1445 const struct btrace_function *bfun;
1446 CORE_ADDR code, special;
1448 cache = *this_cache;
1451 gdb_assert (bfun != NULL);
1453 while (bfun->segment.prev != NULL)
1454 bfun = bfun->segment.prev;
1456 code = get_frame_func (this_frame);
1457 special = bfun->number;
1459 *this_id = frame_id_build_unavailable_stack_special (code, special);
1461 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1462 btrace_get_bfun_name (cache->bfun),
1463 core_addr_to_string_nz (this_id->code_addr),
1464 core_addr_to_string_nz (this_id->special_addr));
1467 /* Implement prev_register method for record_btrace_frame_unwind. */
1469 static struct value *
1470 record_btrace_frame_prev_register (struct frame_info *this_frame,
1474 const struct btrace_frame_cache *cache;
1475 const struct btrace_function *bfun, *caller;
1476 const struct btrace_insn *insn;
1477 struct gdbarch *gdbarch;
1481 gdbarch = get_frame_arch (this_frame);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0 || regnum != pcreg)
1484 throw_error (NOT_AVAILABLE_ERROR,
1485 _("Registers are not available in btrace record history"));
1487 cache = *this_cache;
1489 gdb_assert (bfun != NULL);
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("No caller in btrace record history"));
1496 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1498 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1503 insn = VEC_last (btrace_insn_s, caller->insn);
1506 pc += gdb_insn_length (gdbarch, pc);
1509 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1510 btrace_get_bfun_name (bfun), bfun->level,
1511 core_addr_to_string_nz (pc));
1513 return frame_unwind_got_address (this_frame, regnum, pc);
1516 /* Implement sniffer method for record_btrace_frame_unwind. */
1519 record_btrace_frame_sniffer (const struct frame_unwind *self,
1520 struct frame_info *this_frame,
1523 const struct btrace_function *bfun;
1524 struct btrace_frame_cache *cache;
1525 struct thread_info *tp;
1526 struct frame_info *next;
1528 /* THIS_FRAME does not contain a reference to its thread. */
1529 tp = find_thread_ptid (inferior_ptid);
1530 gdb_assert (tp != NULL);
1533 next = get_next_frame (this_frame);
1536 const struct btrace_insn_iterator *replay;
1538 replay = tp->btrace.replay;
1540 bfun = replay->function;
1544 const struct btrace_function *callee;
1546 callee = btrace_get_frame_function (next);
1547 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1554 DEBUG ("[frame] sniffed frame for %s on level %d",
1555 btrace_get_bfun_name (bfun), bfun->level);
1557 /* This is our frame. Initialize the frame cache. */
1558 cache = bfcache_new (this_frame);
1562 *this_cache = cache;
1566 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1569 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1570 struct frame_info *this_frame,
1573 const struct btrace_function *bfun, *callee;
1574 struct btrace_frame_cache *cache;
1575 struct frame_info *next;
1577 next = get_next_frame (this_frame);
1581 callee = btrace_get_frame_function (next);
1585 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1592 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1593 btrace_get_bfun_name (bfun), bfun->level);
1595 /* This is our frame. Initialize the frame cache. */
1596 cache = bfcache_new (this_frame);
1597 cache->tp = find_thread_ptid (inferior_ptid);
1600 *this_cache = cache;
1605 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1607 struct btrace_frame_cache *cache;
1612 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1613 gdb_assert (slot != NULL);
1615 htab_remove_elt (bfcache, cache);
1618 /* btrace recording does not store previous memory content, neither the stack
1619 frames content. Any unwinding would return errorneous results as the stack
1620 contents no longer matches the changed PC value restored from history.
1621 Therefore this unwinder reports any possibly unwound registers as
1624 const struct frame_unwind record_btrace_frame_unwind =
1627 record_btrace_frame_unwind_stop_reason,
1628 record_btrace_frame_this_id,
1629 record_btrace_frame_prev_register,
1631 record_btrace_frame_sniffer,
1632 record_btrace_frame_dealloc_cache
1635 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1638 record_btrace_frame_unwind_stop_reason,
1639 record_btrace_frame_this_id,
1640 record_btrace_frame_prev_register,
1642 record_btrace_tailcall_frame_sniffer,
1643 record_btrace_frame_dealloc_cache
1646 /* Implement the to_get_unwinder method. */
1648 static const struct frame_unwind *
1649 record_btrace_to_get_unwinder (struct target_ops *self)
1651 return &record_btrace_frame_unwind;
1654 /* Implement the to_get_tailcall_unwinder method. */
1656 static const struct frame_unwind *
1657 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1659 return &record_btrace_tailcall_frame_unwind;
1662 /* Indicate that TP should be resumed according to FLAG. */
1665 record_btrace_resume_thread (struct thread_info *tp,
1666 enum btrace_thread_flag flag)
1668 struct btrace_thread_info *btinfo;
1670 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1672 btinfo = &tp->btrace;
1674 if ((btinfo->flags & BTHR_MOVE) != 0)
1675 error (_("Thread already moving."));
1677 /* Fetch the latest branch trace. */
1680 /* A resume request overwrites a preceding stop request. */
1681 btinfo->flags &= ~BTHR_STOP;
1682 btinfo->flags |= flag;
1685 /* Find the thread to resume given a PTID. */
1687 static struct thread_info *
1688 record_btrace_find_resume_thread (ptid_t ptid)
1690 struct thread_info *tp;
1692 /* When asked to resume everything, we pick the current thread. */
1693 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1694 ptid = inferior_ptid;
1696 return find_thread_ptid (ptid);
1699 /* Start replaying a thread. */
1701 static struct btrace_insn_iterator *
1702 record_btrace_start_replaying (struct thread_info *tp)
1704 struct btrace_insn_iterator *replay;
1705 struct btrace_thread_info *btinfo;
1708 btinfo = &tp->btrace;
1711 /* We can't start replaying without trace. */
1712 if (btinfo->begin == NULL)
1715 /* Clear the executing flag to allow changes to the current frame.
1716 We are not actually running, yet. We just started a reverse execution
1717 command or a record goto command.
1718 For the latter, EXECUTING is false and this has no effect.
1719 For the former, EXECUTING is true and we're in to_wait, about to
1720 move the thread. Since we need to recompute the stack, we temporarily
1721 set EXECUTING to flase. */
1722 executing = is_executing (tp->ptid);
1723 set_executing (tp->ptid, 0);
1725 /* GDB stores the current frame_id when stepping in order to detects steps
1727 Since frames are computed differently when we're replaying, we need to
1728 recompute those stored frames and fix them up so we can still detect
1729 subroutines after we started replaying. */
1732 struct frame_info *frame;
1733 struct frame_id frame_id;
1734 int upd_step_frame_id, upd_step_stack_frame_id;
1736 /* The current frame without replaying - computed via normal unwind. */
1737 frame = get_current_frame ();
1738 frame_id = get_frame_id (frame);
1740 /* Check if we need to update any stepping-related frame id's. */
1741 upd_step_frame_id = frame_id_eq (frame_id,
1742 tp->control.step_frame_id);
1743 upd_step_stack_frame_id = frame_id_eq (frame_id,
1744 tp->control.step_stack_frame_id);
1746 /* We start replaying at the end of the branch trace. This corresponds
1747 to the current instruction. */
1748 replay = XNEW (struct btrace_insn_iterator);
1749 btrace_insn_end (replay, btinfo);
1751 /* Skip gaps at the end of the trace. */
1752 while (btrace_insn_get (replay) == NULL)
1756 steps = btrace_insn_prev (replay, 1);
1758 error (_("No trace."));
1761 /* We're not replaying, yet. */
1762 gdb_assert (btinfo->replay == NULL);
1763 btinfo->replay = replay;
1765 /* Make sure we're not using any stale registers. */
1766 registers_changed_ptid (tp->ptid);
1768 /* The current frame with replaying - computed via btrace unwind. */
1769 frame = get_current_frame ();
1770 frame_id = get_frame_id (frame);
1772 /* Replace stepping related frames where necessary. */
1773 if (upd_step_frame_id)
1774 tp->control.step_frame_id = frame_id;
1775 if (upd_step_stack_frame_id)
1776 tp->control.step_stack_frame_id = frame_id;
1778 CATCH (except, RETURN_MASK_ALL)
1780 /* Restore the previous execution state. */
1781 set_executing (tp->ptid, executing);
1783 xfree (btinfo->replay);
1784 btinfo->replay = NULL;
1786 registers_changed_ptid (tp->ptid);
1788 throw_exception (except);
1792 /* Restore the previous execution state. */
1793 set_executing (tp->ptid, executing);
1798 /* Stop replaying a thread. */
1801 record_btrace_stop_replaying (struct thread_info *tp)
1803 struct btrace_thread_info *btinfo;
1805 btinfo = &tp->btrace;
1807 xfree (btinfo->replay);
1808 btinfo->replay = NULL;
1810 /* Make sure we're not leaving any stale registers. */
1811 registers_changed_ptid (tp->ptid);
1814 /* The to_resume method of target record-btrace. */
1817 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1818 enum gdb_signal signal)
1820 struct thread_info *tp, *other;
1821 enum btrace_thread_flag flag;
1823 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1825 /* Store the execution direction of the last resume. */
1826 record_btrace_resume_exec_dir = execution_direction;
1828 tp = record_btrace_find_resume_thread (ptid);
1830 error (_("Cannot find thread to resume."));
1832 /* Stop replaying other threads if the thread to resume is not replaying. */
1833 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1834 ALL_NON_EXITED_THREADS (other)
1835 record_btrace_stop_replaying (other);
1837 /* As long as we're not replaying, just forward the request. */
1838 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1841 return ops->to_resume (ops, ptid, step, signal);
1844 /* Compute the btrace thread flag for the requested move. */
1846 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1848 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1850 /* At the moment, we only move a single thread. We could also move
1851 all threads in parallel by single-stepping each resumed thread
1852 until the first runs into an event.
1853 When we do that, we would want to continue all other threads.
1854 For now, just resume one thread to not confuse to_wait. */
1855 record_btrace_resume_thread (tp, flag);
1857 /* We just indicate the resume intent here. The actual stepping happens in
1858 record_btrace_wait below. */
1860 /* Async support. */
1861 if (target_can_async_p ())
1864 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1868 /* Find a thread to move. */
1870 static struct thread_info *
1871 record_btrace_find_thread_to_move (ptid_t ptid)
1873 struct thread_info *tp;
1875 /* First check the parameter thread. */
1876 tp = find_thread_ptid (ptid);
1877 if (tp != NULL && (tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
1880 /* Otherwise, find one other thread that has been resumed. */
1881 ALL_NON_EXITED_THREADS (tp)
1882 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
1888 /* Return a target_waitstatus indicating that we ran out of history. */
1890 static struct target_waitstatus
1891 btrace_step_no_history (void)
1893 struct target_waitstatus status;
1895 status.kind = TARGET_WAITKIND_NO_HISTORY;
1900 /* Return a target_waitstatus indicating that a step finished. */
1902 static struct target_waitstatus
1903 btrace_step_stopped (void)
1905 struct target_waitstatus status;
1907 status.kind = TARGET_WAITKIND_STOPPED;
1908 status.value.sig = GDB_SIGNAL_TRAP;
1913 /* Return a target_waitstatus indicating that a thread was stopped as
1916 static struct target_waitstatus
1917 btrace_step_stopped_on_request (void)
1919 struct target_waitstatus status;
1921 status.kind = TARGET_WAITKIND_STOPPED;
1922 status.value.sig = GDB_SIGNAL_0;
1927 /* Clear the record histories. */
1930 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1932 xfree (btinfo->insn_history);
1933 xfree (btinfo->call_history);
1935 btinfo->insn_history = NULL;
1936 btinfo->call_history = NULL;
1939 /* Step a single thread. */
1941 static struct target_waitstatus
1942 record_btrace_step_thread (struct thread_info *tp)
1944 struct btrace_insn_iterator *replay, end;
1945 struct btrace_thread_info *btinfo;
1946 struct address_space *aspace;
1947 struct inferior *inf;
1948 enum btrace_thread_flag flags;
1952 btinfo = &tp->btrace;
1953 replay = btinfo->replay;
1955 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
1956 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1958 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1960 /* We can't step without an execution history. */
1961 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
1962 return btrace_step_no_history ();
1967 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1970 return btrace_step_stopped_on_request ();
1973 /* We're done if we're not replaying. */
1975 return btrace_step_no_history ();
1977 /* Skip gaps during replay. */
1980 steps = btrace_insn_next (replay, 1);
1983 record_btrace_stop_replaying (tp);
1984 return btrace_step_no_history ();
1987 while (btrace_insn_get (replay) == NULL);
1989 /* Determine the end of the instruction trace. */
1990 btrace_insn_end (&end, btinfo);
1992 /* We stop replaying if we reached the end of the trace. */
1993 if (btrace_insn_cmp (replay, &end) == 0)
1994 record_btrace_stop_replaying (tp);
1996 return btrace_step_stopped ();
1999 /* Start replaying if we're not already doing so. */
2001 replay = record_btrace_start_replaying (tp);
2003 /* If we can't step any further, we reached the end of the history.
2004 Skip gaps during replay. */
2007 steps = btrace_insn_prev (replay, 1);
2009 return btrace_step_no_history ();
2012 while (btrace_insn_get (replay) == NULL);
2014 return btrace_step_stopped ();
2017 /* We're done if we're not replaying. */
2019 return btrace_step_no_history ();
2021 inf = find_inferior_ptid (tp->ptid);
2022 aspace = inf->aspace;
2024 /* Determine the end of the instruction trace. */
2025 btrace_insn_end (&end, btinfo);
2029 const struct btrace_insn *insn;
2031 /* Skip gaps during replay. */
2034 steps = btrace_insn_next (replay, 1);
2037 record_btrace_stop_replaying (tp);
2038 return btrace_step_no_history ();
2041 insn = btrace_insn_get (replay);
2043 while (insn == NULL);
2045 /* We stop replaying if we reached the end of the trace. */
2046 if (btrace_insn_cmp (replay, &end) == 0)
2048 record_btrace_stop_replaying (tp);
2049 return btrace_step_no_history ();
2052 DEBUG ("stepping %d (%s) ... %s", tp->num,
2053 target_pid_to_str (tp->ptid),
2054 core_addr_to_string_nz (insn->pc));
2056 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2057 &btinfo->stop_reason))
2058 return btrace_step_stopped ();
2062 /* Start replaying if we're not already doing so. */
2064 replay = record_btrace_start_replaying (tp);
2066 inf = find_inferior_ptid (tp->ptid);
2067 aspace = inf->aspace;
2071 const struct btrace_insn *insn;
2073 /* If we can't step any further, we reached the end of the history.
2074 Skip gaps during replay. */
2077 steps = btrace_insn_prev (replay, 1);
2079 return btrace_step_no_history ();
2081 insn = btrace_insn_get (replay);
2083 while (insn == NULL);
2085 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2086 target_pid_to_str (tp->ptid),
2087 core_addr_to_string_nz (insn->pc));
2089 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2090 &btinfo->stop_reason))
2091 return btrace_step_stopped ();
2096 /* The to_wait method of target record-btrace. */
2099 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2100 struct target_waitstatus *status, int options)
2102 struct thread_info *tp, *other;
2104 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2106 /* As long as we're not replaying, just forward the request. */
2107 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2110 return ops->to_wait (ops, ptid, status, options);
2113 /* Let's find a thread to move. */
2114 tp = record_btrace_find_thread_to_move (ptid);
2117 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2119 status->kind = TARGET_WAITKIND_IGNORE;
2120 return minus_one_ptid;
2123 /* We only move a single thread. We're not able to correlate threads. */
2124 *status = record_btrace_step_thread (tp);
2126 /* Stop all other threads. */
2127 if (!target_is_non_stop_p ())
2128 ALL_NON_EXITED_THREADS (other)
2129 other->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2131 /* Start record histories anew from the current position. */
2132 record_btrace_clear_histories (&tp->btrace);
2134 /* We moved the replay position but did not update registers. */
2135 registers_changed_ptid (tp->ptid);
2140 /* The to_stop method of target record-btrace. */
2143 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2145 DEBUG ("stop %s", target_pid_to_str (ptid));
2147 /* As long as we're not replaying, just forward the request. */
2148 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2151 ops->to_stop (ops, ptid);
2155 struct thread_info *tp;
2157 ALL_NON_EXITED_THREADS (tp)
2158 if (ptid_match (tp->ptid, ptid))
2160 tp->btrace.flags &= ~BTHR_MOVE;
2161 tp->btrace.flags |= BTHR_STOP;
2166 /* The to_can_execute_reverse method of target record-btrace. */
2169 record_btrace_can_execute_reverse (struct target_ops *self)
2174 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2177 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2179 if (record_btrace_is_replaying (ops))
2181 struct thread_info *tp = inferior_thread ();
2183 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2186 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2189 /* The to_supports_stopped_by_sw_breakpoint method of target
2193 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2195 if (record_btrace_is_replaying (ops))
2198 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2201 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2204 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2206 if (record_btrace_is_replaying (ops))
2208 struct thread_info *tp = inferior_thread ();
2210 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2213 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2216 /* The to_supports_stopped_by_hw_breakpoint method of target
2220 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2222 if (record_btrace_is_replaying (ops))
2225 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2228 /* The to_update_thread_list method of target record-btrace. */
2231 record_btrace_update_thread_list (struct target_ops *ops)
2233 /* We don't add or remove threads during replay. */
2234 if (record_btrace_is_replaying (ops))
2237 /* Forward the request. */
2239 ops->to_update_thread_list (ops);
2242 /* The to_thread_alive method of target record-btrace. */
2245 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2247 /* We don't add or remove threads during replay. */
2248 if (record_btrace_is_replaying (ops))
2249 return find_thread_ptid (ptid) != NULL;
2251 /* Forward the request. */
2253 return ops->to_thread_alive (ops, ptid);
2256 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2260 record_btrace_set_replay (struct thread_info *tp,
2261 const struct btrace_insn_iterator *it)
2263 struct btrace_thread_info *btinfo;
2265 btinfo = &tp->btrace;
2267 if (it == NULL || it->function == NULL)
2268 record_btrace_stop_replaying (tp);
2271 if (btinfo->replay == NULL)
2272 record_btrace_start_replaying (tp);
2273 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2276 *btinfo->replay = *it;
2277 registers_changed_ptid (tp->ptid);
2280 /* Start anew from the new replay position. */
2281 record_btrace_clear_histories (btinfo);
2283 stop_pc = regcache_read_pc (get_current_regcache ());
2284 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2287 /* The to_goto_record_begin method of target record-btrace. */
2290 record_btrace_goto_begin (struct target_ops *self)
2292 struct thread_info *tp;
2293 struct btrace_insn_iterator begin;
2295 tp = require_btrace_thread ();
2297 btrace_insn_begin (&begin, &tp->btrace);
2298 record_btrace_set_replay (tp, &begin);
2301 /* The to_goto_record_end method of target record-btrace. */
2304 record_btrace_goto_end (struct target_ops *ops)
2306 struct thread_info *tp;
2308 tp = require_btrace_thread ();
2310 record_btrace_set_replay (tp, NULL);
2313 /* The to_goto_record method of target record-btrace. */
2316 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2318 struct thread_info *tp;
2319 struct btrace_insn_iterator it;
2320 unsigned int number;
2325 /* Check for wrap-arounds. */
2327 error (_("Instruction number out of range."));
2329 tp = require_btrace_thread ();
2331 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2333 error (_("No such instruction."));
2335 record_btrace_set_replay (tp, &it);
2338 /* The to_execution_direction target method. */
2340 static enum exec_direction_kind
2341 record_btrace_execution_direction (struct target_ops *self)
2343 return record_btrace_resume_exec_dir;
2346 /* The to_prepare_to_generate_core target method. */
2349 record_btrace_prepare_to_generate_core (struct target_ops *self)
2351 record_btrace_generating_corefile = 1;
2354 /* The to_done_generating_core target method. */
2357 record_btrace_done_generating_core (struct target_ops *self)
2359 record_btrace_generating_corefile = 0;
2362 /* Initialize the record-btrace target ops. */
2365 init_record_btrace_ops (void)
2367 struct target_ops *ops;
2369 ops = &record_btrace_ops;
2370 ops->to_shortname = "record-btrace";
2371 ops->to_longname = "Branch tracing target";
2372 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2373 ops->to_open = record_btrace_open;
2374 ops->to_close = record_btrace_close;
2375 ops->to_async = record_btrace_async;
2376 ops->to_detach = record_detach;
2377 ops->to_disconnect = record_disconnect;
2378 ops->to_mourn_inferior = record_mourn_inferior;
2379 ops->to_kill = record_kill;
2380 ops->to_stop_recording = record_btrace_stop_recording;
2381 ops->to_info_record = record_btrace_info;
2382 ops->to_insn_history = record_btrace_insn_history;
2383 ops->to_insn_history_from = record_btrace_insn_history_from;
2384 ops->to_insn_history_range = record_btrace_insn_history_range;
2385 ops->to_call_history = record_btrace_call_history;
2386 ops->to_call_history_from = record_btrace_call_history_from;
2387 ops->to_call_history_range = record_btrace_call_history_range;
2388 ops->to_record_is_replaying = record_btrace_is_replaying;
2389 ops->to_xfer_partial = record_btrace_xfer_partial;
2390 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2391 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2392 ops->to_fetch_registers = record_btrace_fetch_registers;
2393 ops->to_store_registers = record_btrace_store_registers;
2394 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2395 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2396 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2397 ops->to_resume = record_btrace_resume;
2398 ops->to_wait = record_btrace_wait;
2399 ops->to_stop = record_btrace_stop;
2400 ops->to_update_thread_list = record_btrace_update_thread_list;
2401 ops->to_thread_alive = record_btrace_thread_alive;
2402 ops->to_goto_record_begin = record_btrace_goto_begin;
2403 ops->to_goto_record_end = record_btrace_goto_end;
2404 ops->to_goto_record = record_btrace_goto;
2405 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2406 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2407 ops->to_supports_stopped_by_sw_breakpoint
2408 = record_btrace_supports_stopped_by_sw_breakpoint;
2409 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2410 ops->to_supports_stopped_by_hw_breakpoint
2411 = record_btrace_supports_stopped_by_hw_breakpoint;
2412 ops->to_execution_direction = record_btrace_execution_direction;
2413 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2414 ops->to_done_generating_core = record_btrace_done_generating_core;
2415 ops->to_stratum = record_stratum;
2416 ops->to_magic = OPS_MAGIC;
2419 /* Start recording in BTS format. */
2422 cmd_record_btrace_bts_start (char *args, int from_tty)
2424 if (args != NULL && *args != 0)
2425 error (_("Invalid argument."));
2427 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2431 execute_command ("target record-btrace", from_tty);
2433 CATCH (exception, RETURN_MASK_ALL)
2435 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2436 throw_exception (exception);
2441 /* Start recording Intel(R) Processor Trace. */
2444 cmd_record_btrace_pt_start (char *args, int from_tty)
2446 if (args != NULL && *args != 0)
2447 error (_("Invalid argument."));
2449 record_btrace_conf.format = BTRACE_FORMAT_PT;
2453 execute_command ("target record-btrace", from_tty);
2455 CATCH (exception, RETURN_MASK_ALL)
2457 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2458 throw_exception (exception);
2463 /* Alias for "target record". */
2466 cmd_record_btrace_start (char *args, int from_tty)
2468 if (args != NULL && *args != 0)
2469 error (_("Invalid argument."));
2471 record_btrace_conf.format = BTRACE_FORMAT_PT;
2475 execute_command ("target record-btrace", from_tty);
2477 CATCH (exception, RETURN_MASK_ALL)
2479 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2483 execute_command ("target record-btrace", from_tty);
2485 CATCH (exception, RETURN_MASK_ALL)
2487 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2488 throw_exception (exception);
2495 /* The "set record btrace" command. */
2498 cmd_set_record_btrace (char *args, int from_tty)
2500 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2503 /* The "show record btrace" command. */
2506 cmd_show_record_btrace (char *args, int from_tty)
2508 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2511 /* The "show record btrace replay-memory-access" command. */
2514 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2515 struct cmd_list_element *c, const char *value)
2517 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2518 replay_memory_access);
2521 /* The "set record btrace bts" command. */
2524 cmd_set_record_btrace_bts (char *args, int from_tty)
2526 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2527 "by an appropriate subcommand.\n"));
2528 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2529 all_commands, gdb_stdout);
2532 /* The "show record btrace bts" command. */
2535 cmd_show_record_btrace_bts (char *args, int from_tty)
2537 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2540 /* The "set record btrace pt" command. */
2543 cmd_set_record_btrace_pt (char *args, int from_tty)
2545 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2546 "by an appropriate subcommand.\n"));
2547 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2548 all_commands, gdb_stdout);
2551 /* The "show record btrace pt" command. */
2554 cmd_show_record_btrace_pt (char *args, int from_tty)
2556 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2559 /* The "record bts buffer-size" show value function. */
2562 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2563 struct cmd_list_element *c,
2566 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2570 /* The "record pt buffer-size" show value function. */
2573 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2574 struct cmd_list_element *c,
2577 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2581 void _initialize_record_btrace (void);
2583 /* Initialize btrace commands. */
2586 _initialize_record_btrace (void)
2588 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2589 _("Start branch trace recording."), &record_btrace_cmdlist,
2590 "record btrace ", 0, &record_cmdlist);
2591 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2593 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2595 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2596 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2597 This format may not be available on all processors."),
2598 &record_btrace_cmdlist);
2599 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2601 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2603 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2604 This format may not be available on all processors."),
2605 &record_btrace_cmdlist);
2606 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2608 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2609 _("Set record options"), &set_record_btrace_cmdlist,
2610 "set record btrace ", 0, &set_record_cmdlist);
2612 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2613 _("Show record options"), &show_record_btrace_cmdlist,
2614 "show record btrace ", 0, &show_record_cmdlist);
2616 add_setshow_enum_cmd ("replay-memory-access", no_class,
2617 replay_memory_access_types, &replay_memory_access, _("\
2618 Set what memory accesses are allowed during replay."), _("\
2619 Show what memory accesses are allowed during replay."),
2620 _("Default is READ-ONLY.\n\n\
2621 The btrace record target does not trace data.\n\
2622 The memory therefore corresponds to the live target and not \
2623 to the current replay position.\n\n\
2624 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2625 When READ-WRITE, allow accesses to read-only and read-write memory during \
2627 NULL, cmd_show_replay_memory_access,
2628 &set_record_btrace_cmdlist,
2629 &show_record_btrace_cmdlist);
2631 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2632 _("Set record btrace bts options"),
2633 &set_record_btrace_bts_cmdlist,
2634 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2636 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2637 _("Show record btrace bts options"),
2638 &show_record_btrace_bts_cmdlist,
2639 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2641 add_setshow_uinteger_cmd ("buffer-size", no_class,
2642 &record_btrace_conf.bts.size,
2643 _("Set the record/replay bts buffer size."),
2644 _("Show the record/replay bts buffer size."), _("\
2645 When starting recording request a trace buffer of this size. \
2646 The actual buffer size may differ from the requested size. \
2647 Use \"info record\" to see the actual buffer size.\n\n\
2648 Bigger buffers allow longer recording but also take more time to process \
2649 the recorded execution trace.\n\n\
2650 The trace buffer size may not be changed while recording."), NULL,
2651 show_record_bts_buffer_size_value,
2652 &set_record_btrace_bts_cmdlist,
2653 &show_record_btrace_bts_cmdlist);
2655 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2656 _("Set record btrace pt options"),
2657 &set_record_btrace_pt_cmdlist,
2658 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2660 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2661 _("Show record btrace pt options"),
2662 &show_record_btrace_pt_cmdlist,
2663 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2665 add_setshow_uinteger_cmd ("buffer-size", no_class,
2666 &record_btrace_conf.pt.size,
2667 _("Set the record/replay pt buffer size."),
2668 _("Show the record/replay pt buffer size."), _("\
2669 Bigger buffers allow longer recording but also take more time to process \
2670 the recorded execution.\n\
2671 The actual buffer size may differ from the requested size. Use \"info record\" \
2672 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2673 &set_record_btrace_pt_cmdlist,
2674 &show_record_btrace_pt_cmdlist);
2676 init_record_btrace_ops ();
2677 add_target (&record_btrace_ops);
2679 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2682 record_btrace_conf.bts.size = 64 * 1024;
2683 record_btrace_conf.pt.size = 16 * 1024;