1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83 /* Command lists for "set/show record btrace pt". */
84 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
85 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
90 #define DEBUG(msg, args...) \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
100 /* Update the branch trace for the current thread and return a pointer to its
103 Throws an error if there is no thread or no trace. This function never
106 static struct thread_info *
107 require_btrace_thread (void)
109 struct thread_info *tp;
113 tp = find_thread_ptid (inferior_ptid);
115 error (_("No thread."));
119 if (btrace_is_empty (tp))
120 error (_("No trace."));
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
128 Throws an error if there is no thread or no trace. This function never
131 static struct btrace_thread_info *
132 require_btrace (void)
134 struct thread_info *tp;
136 tp = require_btrace_thread ();
141 /* Enable branch tracing for one thread. Warn on errors. */
144 record_btrace_enable_warn (struct thread_info *tp)
148 btrace_enable (tp, &record_btrace_conf);
150 CATCH (error, RETURN_MASK_ERROR)
152 warning ("%s", error.message);
157 /* Callback function to disable branch tracing for one thread. */
160 record_btrace_disable_callback (void *arg)
162 struct thread_info *tp;
169 /* Enable automatic tracing of new threads. */
172 record_btrace_auto_enable (void)
174 DEBUG ("attach thread observer");
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
180 /* Disable automatic tracing of new threads. */
183 record_btrace_auto_disable (void)
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
189 DEBUG ("detach thread observer");
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
195 /* The record-btrace async event handler function. */
198 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 inferior_event_handler (INF_REG_EVENT, NULL);
203 /* The to_open method of target record-btrace. */
206 record_btrace_open (const char *args, int from_tty)
208 struct cleanup *disable_chain;
209 struct thread_info *tp;
215 if (!target_has_execution)
216 error (_("The program is not being run."));
219 error (_("Record btrace can't debug inferior in non-stop mode."));
221 gdb_assert (record_btrace_thread_observer == NULL);
223 disable_chain = make_cleanup (null_cleanup, NULL);
224 ALL_NON_EXITED_THREADS (tp)
225 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
227 btrace_enable (tp, &record_btrace_conf);
229 make_cleanup (record_btrace_disable_callback, tp);
232 record_btrace_auto_enable ();
234 push_target (&record_btrace_ops);
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event,
239 record_btrace_generating_corefile = 0;
241 observer_notify_record_changed (current_inferior (), 1);
243 discard_cleanups (disable_chain);
246 /* The to_stop_recording method of target record-btrace. */
249 record_btrace_stop_recording (struct target_ops *self)
251 struct thread_info *tp;
253 DEBUG ("stop recording");
255 record_btrace_auto_disable ();
257 ALL_NON_EXITED_THREADS (tp)
258 if (tp->btrace.target != NULL)
262 /* The to_close method of target record-btrace. */
265 record_btrace_close (struct target_ops *self)
267 struct thread_info *tp;
269 if (record_btrace_async_inferior_event_handler != NULL)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
278 ALL_NON_EXITED_THREADS (tp)
279 btrace_teardown (tp);
282 /* The to_async method of target record-btrace. */
285 record_btrace_async (struct target_ops *ops, int enable)
288 mark_async_event_handler (record_btrace_async_inferior_event_handler);
290 clear_async_event_handler (record_btrace_async_inferior_event_handler);
292 ops->beneath->to_async (ops->beneath, enable);
295 /* Adjusts the size and returns a human readable size suffix. */
298 record_btrace_adjust_size (unsigned int *size)
304 if ((sz & ((1u << 30) - 1)) == 0)
309 else if ((sz & ((1u << 20) - 1)) == 0)
314 else if ((sz & ((1u << 10) - 1)) == 0)
323 /* Print a BTS configuration. */
326 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
334 suffix = record_btrace_adjust_size (&size);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
339 /* Print an Intel(R) Processor Trace configuration. */
342 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
350 suffix = record_btrace_adjust_size (&size);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
355 /* Print a branch tracing configuration. */
358 record_btrace_print_conf (const struct btrace_config *conf)
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf->format));
363 switch (conf->format)
365 case BTRACE_FORMAT_NONE:
368 case BTRACE_FORMAT_BTS:
369 record_btrace_print_bts_conf (&conf->bts);
372 case BTRACE_FORMAT_PT:
373 record_btrace_print_pt_conf (&conf->pt);
377 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
380 /* The to_info_record method of target record-btrace. */
383 record_btrace_info (struct target_ops *self)
385 struct btrace_thread_info *btinfo;
386 const struct btrace_config *conf;
387 struct thread_info *tp;
388 unsigned int insns, calls, gaps;
392 tp = find_thread_ptid (inferior_ptid);
394 error (_("No thread."));
396 btinfo = &tp->btrace;
398 conf = btrace_conf (btinfo);
400 record_btrace_print_conf (conf);
408 if (!btrace_is_empty (tp))
410 struct btrace_call_iterator call;
411 struct btrace_insn_iterator insn;
413 btrace_call_end (&call, btinfo);
414 btrace_call_prev (&call, 1);
415 calls = btrace_call_number (&call);
417 btrace_insn_end (&insn, btinfo);
419 insns = btrace_insn_number (&insn);
422 /* The last instruction does not really belong to the trace. */
429 /* Skip gaps at the end. */
432 steps = btrace_insn_prev (&insn, 1);
436 insns = btrace_insn_number (&insn);
441 gaps = btinfo->ngaps;
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns, calls, gaps,
446 tp->num, target_pid_to_str (tp->ptid));
448 if (btrace_is_replaying (tp))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo->replay));
453 /* Print a decode error. */
456 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457 enum btrace_format format)
462 errstr = _("unknown");
470 case BTRACE_FORMAT_BTS:
476 case BDE_BTS_OVERFLOW:
477 errstr = _("instruction overflow");
480 case BDE_BTS_INSN_SIZE:
481 errstr = _("unknown instruction");
486 #if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT:
490 case BDE_PT_USER_QUIT:
492 errstr = _("trace decode cancelled");
495 case BDE_PT_DISABLED:
497 errstr = _("disabled");
500 case BDE_PT_OVERFLOW:
502 errstr = _("overflow");
507 errstr = pt_errstr (pt_errcode (errcode));
511 #endif /* defined (HAVE_LIBIPT) */
514 ui_out_text (uiout, _("["));
517 ui_out_text (uiout, _("decode error ("));
518 ui_out_field_int (uiout, "errcode", errcode);
519 ui_out_text (uiout, _("): "));
521 ui_out_text (uiout, errstr);
522 ui_out_text (uiout, _("]\n"));
525 /* Print an unsigned int. */
528 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
530 ui_out_field_fmt (uiout, fld, "%u", val);
533 /* Disassemble a section of the recorded instruction trace. */
536 btrace_insn_history (struct ui_out *uiout,
537 const struct btrace_thread_info *btinfo,
538 const struct btrace_insn_iterator *begin,
539 const struct btrace_insn_iterator *end, int flags)
541 struct gdbarch *gdbarch;
542 struct btrace_insn_iterator it;
544 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545 btrace_insn_number (end));
547 gdbarch = target_gdbarch ();
549 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
551 const struct btrace_insn *insn;
553 insn = btrace_insn_get (&it);
555 /* A NULL instruction indicates a gap in the trace. */
558 const struct btrace_config *conf;
560 conf = btrace_conf (btinfo);
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf != NULL);
565 btrace_ui_out_decode_error (uiout, it.function->errcode,
572 /* We may add a speculation prefix later. We use the same space
573 that is used for the pc prefix. */
574 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
575 strncpy (prefix, pc_prefix (insn->pc), 3);
584 /* Print the instruction index. */
585 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
586 ui_out_text (uiout, "\t");
588 /* Indicate speculative execution by a leading '?'. */
589 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
592 /* Print the prefix; we tell gdb_disassembly below to omit it. */
593 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
595 /* Disassembly with '/m' flag may not produce the expected result.
597 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
598 1, insn->pc, insn->pc + 1);
603 /* The to_insn_history method of target record-btrace. */
606 record_btrace_insn_history (struct target_ops *self, int size, int flags)
608 struct btrace_thread_info *btinfo;
609 struct btrace_insn_history *history;
610 struct btrace_insn_iterator begin, end;
611 struct cleanup *uiout_cleanup;
612 struct ui_out *uiout;
613 unsigned int context, covered;
615 uiout = current_uiout;
616 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
618 context = abs (size);
620 error (_("Bad record instruction-history-size."));
622 btinfo = require_btrace ();
623 history = btinfo->insn_history;
626 struct btrace_insn_iterator *replay;
628 DEBUG ("insn-history (0x%x): %d", flags, size);
630 /* If we're replaying, we start at the replay position. Otherwise, we
631 start at the tail of the trace. */
632 replay = btinfo->replay;
636 btrace_insn_end (&begin, btinfo);
638 /* We start from here and expand in the requested direction. Then we
639 expand in the other direction, as well, to fill up any remaining
644 /* We want the current position covered, as well. */
645 covered = btrace_insn_next (&end, 1);
646 covered += btrace_insn_prev (&begin, context - covered);
647 covered += btrace_insn_next (&end, context - covered);
651 covered = btrace_insn_next (&end, context);
652 covered += btrace_insn_prev (&begin, context - covered);
657 begin = history->begin;
660 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
661 btrace_insn_number (&begin), btrace_insn_number (&end));
666 covered = btrace_insn_prev (&begin, context);
671 covered = btrace_insn_next (&end, context);
676 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
680 printf_unfiltered (_("At the start of the branch trace record.\n"));
682 printf_unfiltered (_("At the end of the branch trace record.\n"));
685 btrace_set_insn_history (btinfo, &begin, &end);
686 do_cleanups (uiout_cleanup);
689 /* The to_insn_history_range method of target record-btrace. */
692 record_btrace_insn_history_range (struct target_ops *self,
693 ULONGEST from, ULONGEST to, int flags)
695 struct btrace_thread_info *btinfo;
696 struct btrace_insn_history *history;
697 struct btrace_insn_iterator begin, end;
698 struct cleanup *uiout_cleanup;
699 struct ui_out *uiout;
700 unsigned int low, high;
703 uiout = current_uiout;
704 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
709 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
711 /* Check for wrap-arounds. */
712 if (low != from || high != to)
713 error (_("Bad range."));
716 error (_("Bad range."));
718 btinfo = require_btrace ();
720 found = btrace_find_insn_by_number (&begin, btinfo, low);
722 error (_("Range out of bounds."));
724 found = btrace_find_insn_by_number (&end, btinfo, high);
727 /* Silently truncate the range. */
728 btrace_insn_end (&end, btinfo);
732 /* We want both begin and end to be inclusive. */
733 btrace_insn_next (&end, 1);
736 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
737 btrace_set_insn_history (btinfo, &begin, &end);
739 do_cleanups (uiout_cleanup);
742 /* The to_insn_history_from method of target record-btrace. */
745 record_btrace_insn_history_from (struct target_ops *self,
746 ULONGEST from, int size, int flags)
748 ULONGEST begin, end, context;
750 context = abs (size);
752 error (_("Bad record instruction-history-size."));
761 begin = from - context + 1;
766 end = from + context - 1;
768 /* Check for wrap-around. */
773 record_btrace_insn_history_range (self, begin, end, flags);
776 /* Print the instruction number range for a function call history line. */
779 btrace_call_history_insn_range (struct ui_out *uiout,
780 const struct btrace_function *bfun)
782 unsigned int begin, end, size;
784 size = VEC_length (btrace_insn_s, bfun->insn);
785 gdb_assert (size > 0);
787 begin = bfun->insn_offset;
788 end = begin + size - 1;
790 ui_out_field_uint (uiout, "insn begin", begin);
791 ui_out_text (uiout, ",");
792 ui_out_field_uint (uiout, "insn end", end);
795 /* Compute the lowest and highest source line for the instructions in BFUN
796 and return them in PBEGIN and PEND.
797 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
798 result from inlining or macro expansion. */
801 btrace_compute_src_line_range (const struct btrace_function *bfun,
802 int *pbegin, int *pend)
804 struct btrace_insn *insn;
805 struct symtab *symtab;
817 symtab = symbol_symtab (sym);
819 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
821 struct symtab_and_line sal;
823 sal = find_pc_line (insn->pc, 0);
824 if (sal.symtab != symtab || sal.line == 0)
827 begin = min (begin, sal.line);
828 end = max (end, sal.line);
836 /* Print the source line information for a function call history line. */
839 btrace_call_history_src_line (struct ui_out *uiout,
840 const struct btrace_function *bfun)
849 ui_out_field_string (uiout, "file",
850 symtab_to_filename_for_display (symbol_symtab (sym)));
852 btrace_compute_src_line_range (bfun, &begin, &end);
856 ui_out_text (uiout, ":");
857 ui_out_field_int (uiout, "min line", begin);
862 ui_out_text (uiout, ",");
863 ui_out_field_int (uiout, "max line", end);
866 /* Get the name of a branch trace function. */
869 btrace_get_bfun_name (const struct btrace_function *bfun)
871 struct minimal_symbol *msym;
881 return SYMBOL_PRINT_NAME (sym);
882 else if (msym != NULL)
883 return MSYMBOL_PRINT_NAME (msym);
888 /* Disassemble a section of the recorded function trace. */
891 btrace_call_history (struct ui_out *uiout,
892 const struct btrace_thread_info *btinfo,
893 const struct btrace_call_iterator *begin,
894 const struct btrace_call_iterator *end,
895 enum record_print_flag flags)
897 struct btrace_call_iterator it;
899 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
900 btrace_call_number (end));
902 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
904 const struct btrace_function *bfun;
905 struct minimal_symbol *msym;
908 bfun = btrace_call_get (&it);
912 /* Print the function index. */
913 ui_out_field_uint (uiout, "index", bfun->number);
914 ui_out_text (uiout, "\t");
916 /* Indicate gaps in the trace. */
917 if (bfun->errcode != 0)
919 const struct btrace_config *conf;
921 conf = btrace_conf (btinfo);
923 /* We have trace so we must have a configuration. */
924 gdb_assert (conf != NULL);
926 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
931 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
933 int level = bfun->level + btinfo->level, i;
935 for (i = 0; i < level; ++i)
936 ui_out_text (uiout, " ");
940 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
941 else if (msym != NULL)
942 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
943 else if (!ui_out_is_mi_like_p (uiout))
944 ui_out_field_string (uiout, "function", "??");
946 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
948 ui_out_text (uiout, _("\tinst "));
949 btrace_call_history_insn_range (uiout, bfun);
952 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
954 ui_out_text (uiout, _("\tat "));
955 btrace_call_history_src_line (uiout, bfun);
958 ui_out_text (uiout, "\n");
962 /* The to_call_history method of target record-btrace. */
965 record_btrace_call_history (struct target_ops *self, int size, int flags)
967 struct btrace_thread_info *btinfo;
968 struct btrace_call_history *history;
969 struct btrace_call_iterator begin, end;
970 struct cleanup *uiout_cleanup;
971 struct ui_out *uiout;
972 unsigned int context, covered;
974 uiout = current_uiout;
975 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
977 context = abs (size);
979 error (_("Bad record function-call-history-size."));
981 btinfo = require_btrace ();
982 history = btinfo->call_history;
985 struct btrace_insn_iterator *replay;
987 DEBUG ("call-history (0x%x): %d", flags, size);
989 /* If we're replaying, we start at the replay position. Otherwise, we
990 start at the tail of the trace. */
991 replay = btinfo->replay;
994 begin.function = replay->function;
995 begin.btinfo = btinfo;
998 btrace_call_end (&begin, btinfo);
1000 /* We start from here and expand in the requested direction. Then we
1001 expand in the other direction, as well, to fill up any remaining
1006 /* We want the current position covered, as well. */
1007 covered = btrace_call_next (&end, 1);
1008 covered += btrace_call_prev (&begin, context - covered);
1009 covered += btrace_call_next (&end, context - covered);
1013 covered = btrace_call_next (&end, context);
1014 covered += btrace_call_prev (&begin, context- covered);
1019 begin = history->begin;
1022 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1023 btrace_call_number (&begin), btrace_call_number (&end));
1028 covered = btrace_call_prev (&begin, context);
1033 covered = btrace_call_next (&end, context);
1038 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1042 printf_unfiltered (_("At the start of the branch trace record.\n"));
1044 printf_unfiltered (_("At the end of the branch trace record.\n"));
1047 btrace_set_call_history (btinfo, &begin, &end);
1048 do_cleanups (uiout_cleanup);
1051 /* The to_call_history_range method of target record-btrace. */
1054 record_btrace_call_history_range (struct target_ops *self,
1055 ULONGEST from, ULONGEST to, int flags)
1057 struct btrace_thread_info *btinfo;
1058 struct btrace_call_history *history;
1059 struct btrace_call_iterator begin, end;
1060 struct cleanup *uiout_cleanup;
1061 struct ui_out *uiout;
1062 unsigned int low, high;
1065 uiout = current_uiout;
1066 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1071 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1073 /* Check for wrap-arounds. */
1074 if (low != from || high != to)
1075 error (_("Bad range."));
1078 error (_("Bad range."));
1080 btinfo = require_btrace ();
1082 found = btrace_find_call_by_number (&begin, btinfo, low);
1084 error (_("Range out of bounds."));
1086 found = btrace_find_call_by_number (&end, btinfo, high);
1089 /* Silently truncate the range. */
1090 btrace_call_end (&end, btinfo);
1094 /* We want both begin and end to be inclusive. */
1095 btrace_call_next (&end, 1);
1098 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1099 btrace_set_call_history (btinfo, &begin, &end);
1101 do_cleanups (uiout_cleanup);
1104 /* The to_call_history_from method of target record-btrace. */
1107 record_btrace_call_history_from (struct target_ops *self,
1108 ULONGEST from, int size, int flags)
1110 ULONGEST begin, end, context;
1112 context = abs (size);
1114 error (_("Bad record function-call-history-size."));
1123 begin = from - context + 1;
1128 end = from + context - 1;
1130 /* Check for wrap-around. */
1135 record_btrace_call_history_range (self, begin, end, flags);
1138 /* The to_record_is_replaying method of target record-btrace. */
1141 record_btrace_is_replaying (struct target_ops *self)
1143 struct thread_info *tp;
1145 ALL_NON_EXITED_THREADS (tp)
1146 if (btrace_is_replaying (tp))
1152 /* The to_xfer_partial method of target record-btrace. */
1154 static enum target_xfer_status
1155 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1156 const char *annex, gdb_byte *readbuf,
1157 const gdb_byte *writebuf, ULONGEST offset,
1158 ULONGEST len, ULONGEST *xfered_len)
1160 struct target_ops *t;
1162 /* Filter out requests that don't make sense during replay. */
1163 if (replay_memory_access == replay_memory_access_read_only
1164 && !record_btrace_generating_corefile
1165 && record_btrace_is_replaying (ops))
1169 case TARGET_OBJECT_MEMORY:
1171 struct target_section *section;
1173 /* We do not allow writing memory in general. */
1174 if (writebuf != NULL)
1177 return TARGET_XFER_UNAVAILABLE;
1180 /* We allow reading readonly memory. */
1181 section = target_section_by_addr (ops, offset);
1182 if (section != NULL)
1184 /* Check if the section we found is readonly. */
1185 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1186 section->the_bfd_section)
1187 & SEC_READONLY) != 0)
1189 /* Truncate the request to fit into this section. */
1190 len = min (len, section->endaddr - offset);
1196 return TARGET_XFER_UNAVAILABLE;
1201 /* Forward the request. */
1203 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1204 offset, len, xfered_len);
1207 /* The to_insert_breakpoint method of target record-btrace. */
1210 record_btrace_insert_breakpoint (struct target_ops *ops,
1211 struct gdbarch *gdbarch,
1212 struct bp_target_info *bp_tgt)
1217 /* Inserting breakpoints requires accessing memory. Allow it for the
1218 duration of this function. */
1219 old = replay_memory_access;
1220 replay_memory_access = replay_memory_access_read_write;
1225 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1227 CATCH (except, RETURN_MASK_ALL)
1229 replay_memory_access = old;
1230 throw_exception (except);
1233 replay_memory_access = old;
1238 /* The to_remove_breakpoint method of target record-btrace. */
1241 record_btrace_remove_breakpoint (struct target_ops *ops,
1242 struct gdbarch *gdbarch,
1243 struct bp_target_info *bp_tgt)
1248 /* Removing breakpoints requires accessing memory. Allow it for the
1249 duration of this function. */
1250 old = replay_memory_access;
1251 replay_memory_access = replay_memory_access_read_write;
1256 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1258 CATCH (except, RETURN_MASK_ALL)
1260 replay_memory_access = old;
1261 throw_exception (except);
1264 replay_memory_access = old;
1269 /* The to_fetch_registers method of target record-btrace. */
1272 record_btrace_fetch_registers (struct target_ops *ops,
1273 struct regcache *regcache, int regno)
1275 struct btrace_insn_iterator *replay;
1276 struct thread_info *tp;
1278 tp = find_thread_ptid (inferior_ptid);
1279 gdb_assert (tp != NULL);
1281 replay = tp->btrace.replay;
1282 if (replay != NULL && !record_btrace_generating_corefile)
1284 const struct btrace_insn *insn;
1285 struct gdbarch *gdbarch;
1288 gdbarch = get_regcache_arch (regcache);
1289 pcreg = gdbarch_pc_regnum (gdbarch);
1293 /* We can only provide the PC register. */
1294 if (regno >= 0 && regno != pcreg)
1297 insn = btrace_insn_get (replay);
1298 gdb_assert (insn != NULL);
1300 regcache_raw_supply (regcache, regno, &insn->pc);
1304 struct target_ops *t = ops->beneath;
1306 t->to_fetch_registers (t, regcache, regno);
1310 /* The to_store_registers method of target record-btrace. */
1313 record_btrace_store_registers (struct target_ops *ops,
1314 struct regcache *regcache, int regno)
1316 struct target_ops *t;
1318 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1319 error (_("This record target does not allow writing registers."));
1321 gdb_assert (may_write_registers != 0);
1324 t->to_store_registers (t, regcache, regno);
1327 /* The to_prepare_to_store method of target record-btrace. */
1330 record_btrace_prepare_to_store (struct target_ops *ops,
1331 struct regcache *regcache)
1333 struct target_ops *t;
1335 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1339 t->to_prepare_to_store (t, regcache);
1342 /* The branch trace frame cache. */
1344 struct btrace_frame_cache
1347 struct thread_info *tp;
1349 /* The frame info. */
1350 struct frame_info *frame;
1352 /* The branch trace function segment. */
1353 const struct btrace_function *bfun;
1356 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1358 static htab_t bfcache;
1360 /* hash_f for htab_create_alloc of bfcache. */
1363 bfcache_hash (const void *arg)
1365 const struct btrace_frame_cache *cache = arg;
1367 return htab_hash_pointer (cache->frame);
1370 /* eq_f for htab_create_alloc of bfcache. */
1373 bfcache_eq (const void *arg1, const void *arg2)
1375 const struct btrace_frame_cache *cache1 = arg1;
1376 const struct btrace_frame_cache *cache2 = arg2;
1378 return cache1->frame == cache2->frame;
1381 /* Create a new btrace frame cache. */
1383 static struct btrace_frame_cache *
1384 bfcache_new (struct frame_info *frame)
1386 struct btrace_frame_cache *cache;
1389 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1390 cache->frame = frame;
1392 slot = htab_find_slot (bfcache, cache, INSERT);
1393 gdb_assert (*slot == NULL);
1399 /* Extract the branch trace function from a branch trace frame. */
1401 static const struct btrace_function *
1402 btrace_get_frame_function (struct frame_info *frame)
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406 struct btrace_frame_cache pattern;
1409 pattern.frame = frame;
1411 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1419 /* Implement stop_reason method for record_btrace_frame_unwind. */
1421 static enum unwind_stop_reason
1422 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1425 const struct btrace_frame_cache *cache;
1426 const struct btrace_function *bfun;
1428 cache = *this_cache;
1430 gdb_assert (bfun != NULL);
1432 if (bfun->up == NULL)
1433 return UNWIND_UNAVAILABLE;
1435 return UNWIND_NO_REASON;
1438 /* Implement this_id method for record_btrace_frame_unwind. */
1441 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1442 struct frame_id *this_id)
1444 const struct btrace_frame_cache *cache;
1445 const struct btrace_function *bfun;
1446 CORE_ADDR code, special;
1448 cache = *this_cache;
1451 gdb_assert (bfun != NULL);
1453 while (bfun->segment.prev != NULL)
1454 bfun = bfun->segment.prev;
1456 code = get_frame_func (this_frame);
1457 special = bfun->number;
1459 *this_id = frame_id_build_unavailable_stack_special (code, special);
1461 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1462 btrace_get_bfun_name (cache->bfun),
1463 core_addr_to_string_nz (this_id->code_addr),
1464 core_addr_to_string_nz (this_id->special_addr));
1467 /* Implement prev_register method for record_btrace_frame_unwind. */
1469 static struct value *
1470 record_btrace_frame_prev_register (struct frame_info *this_frame,
1474 const struct btrace_frame_cache *cache;
1475 const struct btrace_function *bfun, *caller;
1476 const struct btrace_insn *insn;
1477 struct gdbarch *gdbarch;
1481 gdbarch = get_frame_arch (this_frame);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0 || regnum != pcreg)
1484 throw_error (NOT_AVAILABLE_ERROR,
1485 _("Registers are not available in btrace record history"));
1487 cache = *this_cache;
1489 gdb_assert (bfun != NULL);
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("No caller in btrace record history"));
1496 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1498 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1503 insn = VEC_last (btrace_insn_s, caller->insn);
1506 pc += gdb_insn_length (gdbarch, pc);
1509 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1510 btrace_get_bfun_name (bfun), bfun->level,
1511 core_addr_to_string_nz (pc));
1513 return frame_unwind_got_address (this_frame, regnum, pc);
1516 /* Implement sniffer method for record_btrace_frame_unwind. */
1519 record_btrace_frame_sniffer (const struct frame_unwind *self,
1520 struct frame_info *this_frame,
1523 const struct btrace_function *bfun;
1524 struct btrace_frame_cache *cache;
1525 struct thread_info *tp;
1526 struct frame_info *next;
1528 /* THIS_FRAME does not contain a reference to its thread. */
1529 tp = find_thread_ptid (inferior_ptid);
1530 gdb_assert (tp != NULL);
1533 next = get_next_frame (this_frame);
1536 const struct btrace_insn_iterator *replay;
1538 replay = tp->btrace.replay;
1540 bfun = replay->function;
1544 const struct btrace_function *callee;
1546 callee = btrace_get_frame_function (next);
1547 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1554 DEBUG ("[frame] sniffed frame for %s on level %d",
1555 btrace_get_bfun_name (bfun), bfun->level);
1557 /* This is our frame. Initialize the frame cache. */
1558 cache = bfcache_new (this_frame);
1562 *this_cache = cache;
1566 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1569 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1570 struct frame_info *this_frame,
1573 const struct btrace_function *bfun, *callee;
1574 struct btrace_frame_cache *cache;
1575 struct frame_info *next;
1577 next = get_next_frame (this_frame);
1581 callee = btrace_get_frame_function (next);
1585 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1592 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1593 btrace_get_bfun_name (bfun), bfun->level);
1595 /* This is our frame. Initialize the frame cache. */
1596 cache = bfcache_new (this_frame);
1597 cache->tp = find_thread_ptid (inferior_ptid);
1600 *this_cache = cache;
1605 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1607 struct btrace_frame_cache *cache;
1612 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1613 gdb_assert (slot != NULL);
1615 htab_remove_elt (bfcache, cache);
1618 /* btrace recording does not store previous memory content, neither the stack
1619 frames content. Any unwinding would return errorneous results as the stack
1620 contents no longer matches the changed PC value restored from history.
1621 Therefore this unwinder reports any possibly unwound registers as
1624 const struct frame_unwind record_btrace_frame_unwind =
1627 record_btrace_frame_unwind_stop_reason,
1628 record_btrace_frame_this_id,
1629 record_btrace_frame_prev_register,
1631 record_btrace_frame_sniffer,
1632 record_btrace_frame_dealloc_cache
1635 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1638 record_btrace_frame_unwind_stop_reason,
1639 record_btrace_frame_this_id,
1640 record_btrace_frame_prev_register,
1642 record_btrace_tailcall_frame_sniffer,
1643 record_btrace_frame_dealloc_cache
1646 /* Implement the to_get_unwinder method. */
1648 static const struct frame_unwind *
1649 record_btrace_to_get_unwinder (struct target_ops *self)
1651 return &record_btrace_frame_unwind;
1654 /* Implement the to_get_tailcall_unwinder method. */
1656 static const struct frame_unwind *
1657 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1659 return &record_btrace_tailcall_frame_unwind;
1662 /* Indicate that TP should be resumed according to FLAG. */
1665 record_btrace_resume_thread (struct thread_info *tp,
1666 enum btrace_thread_flag flag)
1668 struct btrace_thread_info *btinfo;
1670 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1672 btinfo = &tp->btrace;
1674 if ((btinfo->flags & BTHR_MOVE) != 0)
1675 error (_("Thread already moving."));
1677 /* Fetch the latest branch trace. */
1680 btinfo->flags |= flag;
1683 /* Find the thread to resume given a PTID. */
1685 static struct thread_info *
1686 record_btrace_find_resume_thread (ptid_t ptid)
1688 struct thread_info *tp;
1690 /* When asked to resume everything, we pick the current thread. */
1691 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1692 ptid = inferior_ptid;
1694 return find_thread_ptid (ptid);
1697 /* Start replaying a thread. */
1699 static struct btrace_insn_iterator *
1700 record_btrace_start_replaying (struct thread_info *tp)
1702 struct btrace_insn_iterator *replay;
1703 struct btrace_thread_info *btinfo;
1706 btinfo = &tp->btrace;
1709 /* We can't start replaying without trace. */
1710 if (btinfo->begin == NULL)
1713 /* Clear the executing flag to allow changes to the current frame.
1714 We are not actually running, yet. We just started a reverse execution
1715 command or a record goto command.
1716 For the latter, EXECUTING is false and this has no effect.
1717 For the former, EXECUTING is true and we're in to_wait, about to
1718 move the thread. Since we need to recompute the stack, we temporarily
1719 set EXECUTING to flase. */
1720 executing = is_executing (tp->ptid);
1721 set_executing (tp->ptid, 0);
1723 /* GDB stores the current frame_id when stepping in order to detects steps
1725 Since frames are computed differently when we're replaying, we need to
1726 recompute those stored frames and fix them up so we can still detect
1727 subroutines after we started replaying. */
1730 struct frame_info *frame;
1731 struct frame_id frame_id;
1732 int upd_step_frame_id, upd_step_stack_frame_id;
1734 /* The current frame without replaying - computed via normal unwind. */
1735 frame = get_current_frame ();
1736 frame_id = get_frame_id (frame);
1738 /* Check if we need to update any stepping-related frame id's. */
1739 upd_step_frame_id = frame_id_eq (frame_id,
1740 tp->control.step_frame_id);
1741 upd_step_stack_frame_id = frame_id_eq (frame_id,
1742 tp->control.step_stack_frame_id);
1744 /* We start replaying at the end of the branch trace. This corresponds
1745 to the current instruction. */
1746 replay = XNEW (struct btrace_insn_iterator);
1747 btrace_insn_end (replay, btinfo);
1749 /* Skip gaps at the end of the trace. */
1750 while (btrace_insn_get (replay) == NULL)
1754 steps = btrace_insn_prev (replay, 1);
1756 error (_("No trace."));
1759 /* We're not replaying, yet. */
1760 gdb_assert (btinfo->replay == NULL);
1761 btinfo->replay = replay;
1763 /* Make sure we're not using any stale registers. */
1764 registers_changed_ptid (tp->ptid);
1766 /* The current frame with replaying - computed via btrace unwind. */
1767 frame = get_current_frame ();
1768 frame_id = get_frame_id (frame);
1770 /* Replace stepping related frames where necessary. */
1771 if (upd_step_frame_id)
1772 tp->control.step_frame_id = frame_id;
1773 if (upd_step_stack_frame_id)
1774 tp->control.step_stack_frame_id = frame_id;
1776 CATCH (except, RETURN_MASK_ALL)
1778 /* Restore the previous execution state. */
1779 set_executing (tp->ptid, executing);
1781 xfree (btinfo->replay);
1782 btinfo->replay = NULL;
1784 registers_changed_ptid (tp->ptid);
1786 throw_exception (except);
1790 /* Restore the previous execution state. */
1791 set_executing (tp->ptid, executing);
1796 /* Stop replaying a thread. */
1799 record_btrace_stop_replaying (struct thread_info *tp)
1801 struct btrace_thread_info *btinfo;
1803 btinfo = &tp->btrace;
1805 xfree (btinfo->replay);
1806 btinfo->replay = NULL;
1808 /* Make sure we're not leaving any stale registers. */
1809 registers_changed_ptid (tp->ptid);
1812 /* The to_resume method of target record-btrace. */
1815 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1816 enum gdb_signal signal)
1818 struct thread_info *tp, *other;
1819 enum btrace_thread_flag flag;
1821 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1823 /* Store the execution direction of the last resume. */
1824 record_btrace_resume_exec_dir = execution_direction;
1826 tp = record_btrace_find_resume_thread (ptid);
1828 error (_("Cannot find thread to resume."));
1830 /* Stop replaying other threads if the thread to resume is not replaying. */
1831 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1832 ALL_NON_EXITED_THREADS (other)
1833 record_btrace_stop_replaying (other);
1835 /* As long as we're not replaying, just forward the request. */
1836 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1839 return ops->to_resume (ops, ptid, step, signal);
1842 /* Compute the btrace thread flag for the requested move. */
1844 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1846 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1848 /* At the moment, we only move a single thread. We could also move
1849 all threads in parallel by single-stepping each resumed thread
1850 until the first runs into an event.
1851 When we do that, we would want to continue all other threads.
1852 For now, just resume one thread to not confuse to_wait. */
1853 record_btrace_resume_thread (tp, flag);
1855 /* We just indicate the resume intent here. The actual stepping happens in
1856 record_btrace_wait below. */
1858 /* Async support. */
1859 if (target_can_async_p ())
1862 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1866 /* Find a thread to move. */
1868 static struct thread_info *
1869 record_btrace_find_thread_to_move (ptid_t ptid)
1871 struct thread_info *tp;
1873 /* First check the parameter thread. */
1874 tp = find_thread_ptid (ptid);
1875 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1878 /* Otherwise, find one other thread that has been resumed. */
1879 ALL_NON_EXITED_THREADS (tp)
1880 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1886 /* Return a target_waitstatus indicating that we ran out of history. */
1888 static struct target_waitstatus
1889 btrace_step_no_history (void)
1891 struct target_waitstatus status;
1893 status.kind = TARGET_WAITKIND_NO_HISTORY;
1898 /* Return a target_waitstatus indicating that a step finished. */
1900 static struct target_waitstatus
1901 btrace_step_stopped (void)
1903 struct target_waitstatus status;
1905 status.kind = TARGET_WAITKIND_STOPPED;
1906 status.value.sig = GDB_SIGNAL_TRAP;
1911 /* Clear the record histories. */
1914 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1916 xfree (btinfo->insn_history);
1917 xfree (btinfo->call_history);
1919 btinfo->insn_history = NULL;
1920 btinfo->call_history = NULL;
1923 /* Step a single thread. */
1925 static struct target_waitstatus
1926 record_btrace_step_thread (struct thread_info *tp)
1928 struct btrace_insn_iterator *replay, end;
1929 struct btrace_thread_info *btinfo;
1930 struct address_space *aspace;
1931 struct inferior *inf;
1932 enum btrace_thread_flag flags;
1935 /* We can't step without an execution history. */
1936 if (btrace_is_empty (tp))
1937 return btrace_step_no_history ();
1939 btinfo = &tp->btrace;
1940 replay = btinfo->replay;
1942 flags = btinfo->flags & BTHR_MOVE;
1943 btinfo->flags &= ~BTHR_MOVE;
1945 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1950 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1953 /* We're done if we're not replaying. */
1955 return btrace_step_no_history ();
1957 /* Skip gaps during replay. */
1960 steps = btrace_insn_next (replay, 1);
1963 record_btrace_stop_replaying (tp);
1964 return btrace_step_no_history ();
1967 while (btrace_insn_get (replay) == NULL);
1969 /* Determine the end of the instruction trace. */
1970 btrace_insn_end (&end, btinfo);
1972 /* We stop replaying if we reached the end of the trace. */
1973 if (btrace_insn_cmp (replay, &end) == 0)
1974 record_btrace_stop_replaying (tp);
1976 return btrace_step_stopped ();
1979 /* Start replaying if we're not already doing so. */
1981 replay = record_btrace_start_replaying (tp);
1983 /* If we can't step any further, we reached the end of the history.
1984 Skip gaps during replay. */
1987 steps = btrace_insn_prev (replay, 1);
1989 return btrace_step_no_history ();
1992 while (btrace_insn_get (replay) == NULL);
1994 return btrace_step_stopped ();
1997 /* We're done if we're not replaying. */
1999 return btrace_step_no_history ();
2001 inf = find_inferior_ptid (tp->ptid);
2002 aspace = inf->aspace;
2004 /* Determine the end of the instruction trace. */
2005 btrace_insn_end (&end, btinfo);
2009 const struct btrace_insn *insn;
2011 /* Skip gaps during replay. */
2014 steps = btrace_insn_next (replay, 1);
2017 record_btrace_stop_replaying (tp);
2018 return btrace_step_no_history ();
2021 insn = btrace_insn_get (replay);
2023 while (insn == NULL);
2025 /* We stop replaying if we reached the end of the trace. */
2026 if (btrace_insn_cmp (replay, &end) == 0)
2028 record_btrace_stop_replaying (tp);
2029 return btrace_step_no_history ();
2032 DEBUG ("stepping %d (%s) ... %s", tp->num,
2033 target_pid_to_str (tp->ptid),
2034 core_addr_to_string_nz (insn->pc));
2036 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2037 &btinfo->stop_reason))
2038 return btrace_step_stopped ();
2042 /* Start replaying if we're not already doing so. */
2044 replay = record_btrace_start_replaying (tp);
2046 inf = find_inferior_ptid (tp->ptid);
2047 aspace = inf->aspace;
2051 const struct btrace_insn *insn;
2053 /* If we can't step any further, we reached the end of the history.
2054 Skip gaps during replay. */
2057 steps = btrace_insn_prev (replay, 1);
2059 return btrace_step_no_history ();
2061 insn = btrace_insn_get (replay);
2063 while (insn == NULL);
2065 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2066 target_pid_to_str (tp->ptid),
2067 core_addr_to_string_nz (insn->pc));
2069 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2070 &btinfo->stop_reason))
2071 return btrace_step_stopped ();
2076 /* The to_wait method of target record-btrace. */
2079 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2080 struct target_waitstatus *status, int options)
2082 struct thread_info *tp, *other;
2084 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2086 /* As long as we're not replaying, just forward the request. */
2087 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2090 return ops->to_wait (ops, ptid, status, options);
2093 /* Let's find a thread to move. */
2094 tp = record_btrace_find_thread_to_move (ptid);
2097 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2099 status->kind = TARGET_WAITKIND_IGNORE;
2100 return minus_one_ptid;
2103 /* We only move a single thread. We're not able to correlate threads. */
2104 *status = record_btrace_step_thread (tp);
2106 /* Stop all other threads. */
2107 if (!target_is_non_stop_p ())
2108 ALL_NON_EXITED_THREADS (other)
2109 other->btrace.flags &= ~BTHR_MOVE;
2111 /* Start record histories anew from the current position. */
2112 record_btrace_clear_histories (&tp->btrace);
2114 /* We moved the replay position but did not update registers. */
2115 registers_changed_ptid (tp->ptid);
2120 /* The to_can_execute_reverse method of target record-btrace. */
2123 record_btrace_can_execute_reverse (struct target_ops *self)
2128 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2131 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2133 if (record_btrace_is_replaying (ops))
2135 struct thread_info *tp = inferior_thread ();
2137 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2140 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2143 /* The to_supports_stopped_by_sw_breakpoint method of target
2147 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2149 if (record_btrace_is_replaying (ops))
2152 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2155 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2158 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2160 if (record_btrace_is_replaying (ops))
2162 struct thread_info *tp = inferior_thread ();
2164 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2167 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2170 /* The to_supports_stopped_by_hw_breakpoint method of target
2174 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2176 if (record_btrace_is_replaying (ops))
2179 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2182 /* The to_update_thread_list method of target record-btrace. */
2185 record_btrace_update_thread_list (struct target_ops *ops)
2187 /* We don't add or remove threads during replay. */
2188 if (record_btrace_is_replaying (ops))
2191 /* Forward the request. */
2193 ops->to_update_thread_list (ops);
2196 /* The to_thread_alive method of target record-btrace. */
2199 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2201 /* We don't add or remove threads during replay. */
2202 if (record_btrace_is_replaying (ops))
2203 return find_thread_ptid (ptid) != NULL;
2205 /* Forward the request. */
2207 return ops->to_thread_alive (ops, ptid);
2210 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2214 record_btrace_set_replay (struct thread_info *tp,
2215 const struct btrace_insn_iterator *it)
2217 struct btrace_thread_info *btinfo;
2219 btinfo = &tp->btrace;
2221 if (it == NULL || it->function == NULL)
2222 record_btrace_stop_replaying (tp);
2225 if (btinfo->replay == NULL)
2226 record_btrace_start_replaying (tp);
2227 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2230 *btinfo->replay = *it;
2231 registers_changed_ptid (tp->ptid);
2234 /* Start anew from the new replay position. */
2235 record_btrace_clear_histories (btinfo);
2237 stop_pc = regcache_read_pc (get_current_regcache ());
2238 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2241 /* The to_goto_record_begin method of target record-btrace. */
2244 record_btrace_goto_begin (struct target_ops *self)
2246 struct thread_info *tp;
2247 struct btrace_insn_iterator begin;
2249 tp = require_btrace_thread ();
2251 btrace_insn_begin (&begin, &tp->btrace);
2252 record_btrace_set_replay (tp, &begin);
2255 /* The to_goto_record_end method of target record-btrace. */
2258 record_btrace_goto_end (struct target_ops *ops)
2260 struct thread_info *tp;
2262 tp = require_btrace_thread ();
2264 record_btrace_set_replay (tp, NULL);
2267 /* The to_goto_record method of target record-btrace. */
2270 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2272 struct thread_info *tp;
2273 struct btrace_insn_iterator it;
2274 unsigned int number;
2279 /* Check for wrap-arounds. */
2281 error (_("Instruction number out of range."));
2283 tp = require_btrace_thread ();
2285 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2287 error (_("No such instruction."));
2289 record_btrace_set_replay (tp, &it);
2292 /* The to_execution_direction target method. */
2294 static enum exec_direction_kind
2295 record_btrace_execution_direction (struct target_ops *self)
2297 return record_btrace_resume_exec_dir;
2300 /* The to_prepare_to_generate_core target method. */
2303 record_btrace_prepare_to_generate_core (struct target_ops *self)
2305 record_btrace_generating_corefile = 1;
2308 /* The to_done_generating_core target method. */
2311 record_btrace_done_generating_core (struct target_ops *self)
2313 record_btrace_generating_corefile = 0;
2316 /* Initialize the record-btrace target ops. */
2319 init_record_btrace_ops (void)
2321 struct target_ops *ops;
2323 ops = &record_btrace_ops;
2324 ops->to_shortname = "record-btrace";
2325 ops->to_longname = "Branch tracing target";
2326 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2327 ops->to_open = record_btrace_open;
2328 ops->to_close = record_btrace_close;
2329 ops->to_async = record_btrace_async;
2330 ops->to_detach = record_detach;
2331 ops->to_disconnect = record_disconnect;
2332 ops->to_mourn_inferior = record_mourn_inferior;
2333 ops->to_kill = record_kill;
2334 ops->to_stop_recording = record_btrace_stop_recording;
2335 ops->to_info_record = record_btrace_info;
2336 ops->to_insn_history = record_btrace_insn_history;
2337 ops->to_insn_history_from = record_btrace_insn_history_from;
2338 ops->to_insn_history_range = record_btrace_insn_history_range;
2339 ops->to_call_history = record_btrace_call_history;
2340 ops->to_call_history_from = record_btrace_call_history_from;
2341 ops->to_call_history_range = record_btrace_call_history_range;
2342 ops->to_record_is_replaying = record_btrace_is_replaying;
2343 ops->to_xfer_partial = record_btrace_xfer_partial;
2344 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2345 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2346 ops->to_fetch_registers = record_btrace_fetch_registers;
2347 ops->to_store_registers = record_btrace_store_registers;
2348 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2349 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2350 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2351 ops->to_resume = record_btrace_resume;
2352 ops->to_wait = record_btrace_wait;
2353 ops->to_update_thread_list = record_btrace_update_thread_list;
2354 ops->to_thread_alive = record_btrace_thread_alive;
2355 ops->to_goto_record_begin = record_btrace_goto_begin;
2356 ops->to_goto_record_end = record_btrace_goto_end;
2357 ops->to_goto_record = record_btrace_goto;
2358 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2359 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2360 ops->to_supports_stopped_by_sw_breakpoint
2361 = record_btrace_supports_stopped_by_sw_breakpoint;
2362 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2363 ops->to_supports_stopped_by_hw_breakpoint
2364 = record_btrace_supports_stopped_by_hw_breakpoint;
2365 ops->to_execution_direction = record_btrace_execution_direction;
2366 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2367 ops->to_done_generating_core = record_btrace_done_generating_core;
2368 ops->to_stratum = record_stratum;
2369 ops->to_magic = OPS_MAGIC;
2372 /* Start recording in BTS format. */
2375 cmd_record_btrace_bts_start (char *args, int from_tty)
2377 if (args != NULL && *args != 0)
2378 error (_("Invalid argument."));
2380 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2384 execute_command ("target record-btrace", from_tty);
2386 CATCH (exception, RETURN_MASK_ALL)
2388 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2389 throw_exception (exception);
2394 /* Start recording Intel(R) Processor Trace. */
2397 cmd_record_btrace_pt_start (char *args, int from_tty)
2399 if (args != NULL && *args != 0)
2400 error (_("Invalid argument."));
2402 record_btrace_conf.format = BTRACE_FORMAT_PT;
2406 execute_command ("target record-btrace", from_tty);
2408 CATCH (exception, RETURN_MASK_ALL)
2410 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2411 throw_exception (exception);
2416 /* Alias for "target record". */
2419 cmd_record_btrace_start (char *args, int from_tty)
2421 if (args != NULL && *args != 0)
2422 error (_("Invalid argument."));
2424 record_btrace_conf.format = BTRACE_FORMAT_PT;
2428 execute_command ("target record-btrace", from_tty);
2430 CATCH (exception, RETURN_MASK_ALL)
2432 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2436 execute_command ("target record-btrace", from_tty);
2438 CATCH (exception, RETURN_MASK_ALL)
2440 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2441 throw_exception (exception);
2448 /* The "set record btrace" command. */
2451 cmd_set_record_btrace (char *args, int from_tty)
2453 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2456 /* The "show record btrace" command. */
2459 cmd_show_record_btrace (char *args, int from_tty)
2461 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2464 /* The "show record btrace replay-memory-access" command. */
2467 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2468 struct cmd_list_element *c, const char *value)
2470 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2471 replay_memory_access);
2474 /* The "set record btrace bts" command. */
2477 cmd_set_record_btrace_bts (char *args, int from_tty)
2479 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2480 "by an appropriate subcommand.\n"));
2481 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2482 all_commands, gdb_stdout);
2485 /* The "show record btrace bts" command. */
2488 cmd_show_record_btrace_bts (char *args, int from_tty)
2490 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2493 /* The "set record btrace pt" command. */
2496 cmd_set_record_btrace_pt (char *args, int from_tty)
2498 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2499 "by an appropriate subcommand.\n"));
2500 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2501 all_commands, gdb_stdout);
2504 /* The "show record btrace pt" command. */
2507 cmd_show_record_btrace_pt (char *args, int from_tty)
2509 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2512 /* The "record bts buffer-size" show value function. */
2515 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2516 struct cmd_list_element *c,
2519 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2523 /* The "record pt buffer-size" show value function. */
2526 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2527 struct cmd_list_element *c,
2530 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2534 void _initialize_record_btrace (void);
2536 /* Initialize btrace commands. */
2539 _initialize_record_btrace (void)
2541 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2542 _("Start branch trace recording."), &record_btrace_cmdlist,
2543 "record btrace ", 0, &record_cmdlist);
2544 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2546 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2548 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2549 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2550 This format may not be available on all processors."),
2551 &record_btrace_cmdlist);
2552 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2554 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2556 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2557 This format may not be available on all processors."),
2558 &record_btrace_cmdlist);
2559 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2561 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2562 _("Set record options"), &set_record_btrace_cmdlist,
2563 "set record btrace ", 0, &set_record_cmdlist);
2565 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2566 _("Show record options"), &show_record_btrace_cmdlist,
2567 "show record btrace ", 0, &show_record_cmdlist);
2569 add_setshow_enum_cmd ("replay-memory-access", no_class,
2570 replay_memory_access_types, &replay_memory_access, _("\
2571 Set what memory accesses are allowed during replay."), _("\
2572 Show what memory accesses are allowed during replay."),
2573 _("Default is READ-ONLY.\n\n\
2574 The btrace record target does not trace data.\n\
2575 The memory therefore corresponds to the live target and not \
2576 to the current replay position.\n\n\
2577 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2578 When READ-WRITE, allow accesses to read-only and read-write memory during \
2580 NULL, cmd_show_replay_memory_access,
2581 &set_record_btrace_cmdlist,
2582 &show_record_btrace_cmdlist);
2584 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2585 _("Set record btrace bts options"),
2586 &set_record_btrace_bts_cmdlist,
2587 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2589 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2590 _("Show record btrace bts options"),
2591 &show_record_btrace_bts_cmdlist,
2592 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2594 add_setshow_uinteger_cmd ("buffer-size", no_class,
2595 &record_btrace_conf.bts.size,
2596 _("Set the record/replay bts buffer size."),
2597 _("Show the record/replay bts buffer size."), _("\
2598 When starting recording request a trace buffer of this size. \
2599 The actual buffer size may differ from the requested size. \
2600 Use \"info record\" to see the actual buffer size.\n\n\
2601 Bigger buffers allow longer recording but also take more time to process \
2602 the recorded execution trace.\n\n\
2603 The trace buffer size may not be changed while recording."), NULL,
2604 show_record_bts_buffer_size_value,
2605 &set_record_btrace_bts_cmdlist,
2606 &show_record_btrace_bts_cmdlist);
2608 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2609 _("Set record btrace pt options"),
2610 &set_record_btrace_pt_cmdlist,
2611 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2613 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2614 _("Show record btrace pt options"),
2615 &show_record_btrace_pt_cmdlist,
2616 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2618 add_setshow_uinteger_cmd ("buffer-size", no_class,
2619 &record_btrace_conf.pt.size,
2620 _("Set the record/replay pt buffer size."),
2621 _("Show the record/replay pt buffer size."), _("\
2622 Bigger buffers allow longer recording but also take more time to process \
2623 the recorded execution.\n\
2624 The actual buffer size may differ from the requested size. Use \"info record\" \
2625 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2626 &set_record_btrace_pt_cmdlist,
2627 &show_record_btrace_pt_cmdlist);
2629 init_record_btrace_ops ();
2630 add_target (&record_btrace_ops);
2632 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2635 record_btrace_conf.bts.size = 64 * 1024;
2636 record_btrace_conf.pt.size = 16 * 1024;