1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
74 /* Print a record-btrace debug message. Use do ... while (0) to avoid
75 ambiguities when used in if statements. */
77 #define DEBUG(msg, args...) \
80 if (record_debug != 0) \
81 fprintf_unfiltered (gdb_stdlog, \
82 "[record-btrace] " msg "\n", ##args); \
87 /* Update the branch trace for the current thread and return a pointer to its
90 Throws an error if there is no thread or no trace. This function never
93 static struct thread_info *
94 require_btrace_thread (void)
96 struct thread_info *tp;
100 tp = find_thread_ptid (inferior_ptid);
102 error (_("No thread."));
106 if (btrace_is_empty (tp))
107 error (_("No trace."));
112 /* Update the branch trace for the current thread and return a pointer to its
113 branch trace information struct.
115 Throws an error if there is no thread or no trace. This function never
118 static struct btrace_thread_info *
119 require_btrace (void)
121 struct thread_info *tp;
123 tp = require_btrace_thread ();
128 /* Enable branch tracing for one thread. Warn on errors. */
131 record_btrace_enable_warn (struct thread_info *tp)
133 volatile struct gdb_exception error;
135 TRY_CATCH (error, RETURN_MASK_ERROR)
138 if (error.message != NULL)
139 warning ("%s", error.message);
142 /* Callback function to disable branch tracing for one thread. */
145 record_btrace_disable_callback (void *arg)
147 struct thread_info *tp;
154 /* Enable automatic tracing of new threads. */
157 record_btrace_auto_enable (void)
159 DEBUG ("attach thread observer");
161 record_btrace_thread_observer
162 = observer_attach_new_thread (record_btrace_enable_warn);
165 /* Disable automatic tracing of new threads. */
168 record_btrace_auto_disable (void)
170 /* The observer may have been detached, already. */
171 if (record_btrace_thread_observer == NULL)
174 DEBUG ("detach thread observer");
176 observer_detach_new_thread (record_btrace_thread_observer);
177 record_btrace_thread_observer = NULL;
180 /* The record-btrace async event handler function. */
183 record_btrace_handle_async_inferior_event (gdb_client_data data)
185 inferior_event_handler (INF_REG_EVENT, NULL);
188 /* The to_open method of target record-btrace. */
191 record_btrace_open (char *args, int from_tty)
193 struct cleanup *disable_chain;
194 struct thread_info *tp;
200 if (!target_has_execution)
201 error (_("The program is not being run."));
203 if (!target_supports_btrace ())
204 error (_("Target does not support branch tracing."));
207 error (_("Record btrace can't debug inferior in non-stop mode."));
209 gdb_assert (record_btrace_thread_observer == NULL);
211 disable_chain = make_cleanup (null_cleanup, NULL);
212 ALL_NON_EXITED_THREADS (tp)
213 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
217 make_cleanup (record_btrace_disable_callback, tp);
220 record_btrace_auto_enable ();
222 push_target (&record_btrace_ops);
224 record_btrace_async_inferior_event_handler
225 = create_async_event_handler (record_btrace_handle_async_inferior_event,
227 record_btrace_generating_corefile = 0;
229 observer_notify_record_changed (current_inferior (), 1);
231 discard_cleanups (disable_chain);
234 /* The to_stop_recording method of target record-btrace. */
237 record_btrace_stop_recording (struct target_ops *self)
239 struct thread_info *tp;
241 DEBUG ("stop recording");
243 record_btrace_auto_disable ();
245 ALL_NON_EXITED_THREADS (tp)
246 if (tp->btrace.target != NULL)
250 /* The to_close method of target record-btrace. */
253 record_btrace_close (struct target_ops *self)
255 struct thread_info *tp;
257 if (record_btrace_async_inferior_event_handler != NULL)
258 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
260 /* Make sure automatic recording gets disabled even if we did not stop
261 recording before closing the record-btrace target. */
262 record_btrace_auto_disable ();
264 /* We should have already stopped recording.
265 Tear down btrace in case we have not. */
266 ALL_NON_EXITED_THREADS (tp)
267 btrace_teardown (tp);
270 /* The to_info_record method of target record-btrace. */
273 record_btrace_info (struct target_ops *self)
275 struct btrace_thread_info *btinfo;
276 struct thread_info *tp;
277 unsigned int insns, calls;
281 tp = find_thread_ptid (inferior_ptid);
283 error (_("No thread."));
290 btinfo = &tp->btrace;
292 if (!btrace_is_empty (tp))
294 struct btrace_call_iterator call;
295 struct btrace_insn_iterator insn;
297 btrace_call_end (&call, btinfo);
298 btrace_call_prev (&call, 1);
299 calls = btrace_call_number (&call);
301 btrace_insn_end (&insn, btinfo);
302 btrace_insn_prev (&insn, 1);
303 insns = btrace_insn_number (&insn);
306 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
307 "%d (%s).\n"), insns, calls, tp->num,
308 target_pid_to_str (tp->ptid));
310 if (btrace_is_replaying (tp))
311 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
312 btrace_insn_number (btinfo->replay));
315 /* Print an unsigned int. */
318 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
320 ui_out_field_fmt (uiout, fld, "%u", val);
323 /* Disassemble a section of the recorded instruction trace. */
326 btrace_insn_history (struct ui_out *uiout,
327 const struct btrace_insn_iterator *begin,
328 const struct btrace_insn_iterator *end, int flags)
330 struct gdbarch *gdbarch;
331 struct btrace_insn_iterator it;
333 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
334 btrace_insn_number (end));
336 gdbarch = target_gdbarch ();
338 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
340 const struct btrace_insn *insn;
342 insn = btrace_insn_get (&it);
344 /* Print the instruction index. */
345 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
346 ui_out_text (uiout, "\t");
348 /* Disassembly with '/m' flag may not produce the expected result.
350 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
354 /* The to_insn_history method of target record-btrace. */
357 record_btrace_insn_history (struct target_ops *self, int size, int flags)
359 struct btrace_thread_info *btinfo;
360 struct btrace_insn_history *history;
361 struct btrace_insn_iterator begin, end;
362 struct cleanup *uiout_cleanup;
363 struct ui_out *uiout;
364 unsigned int context, covered;
366 uiout = current_uiout;
367 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
369 context = abs (size);
371 error (_("Bad record instruction-history-size."));
373 btinfo = require_btrace ();
374 history = btinfo->insn_history;
377 struct btrace_insn_iterator *replay;
379 DEBUG ("insn-history (0x%x): %d", flags, size);
381 /* If we're replaying, we start at the replay position. Otherwise, we
382 start at the tail of the trace. */
383 replay = btinfo->replay;
387 btrace_insn_end (&begin, btinfo);
389 /* We start from here and expand in the requested direction. Then we
390 expand in the other direction, as well, to fill up any remaining
395 /* We want the current position covered, as well. */
396 covered = btrace_insn_next (&end, 1);
397 covered += btrace_insn_prev (&begin, context - covered);
398 covered += btrace_insn_next (&end, context - covered);
402 covered = btrace_insn_next (&end, context);
403 covered += btrace_insn_prev (&begin, context - covered);
408 begin = history->begin;
411 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
412 btrace_insn_number (&begin), btrace_insn_number (&end));
417 covered = btrace_insn_prev (&begin, context);
422 covered = btrace_insn_next (&end, context);
427 btrace_insn_history (uiout, &begin, &end, flags);
431 printf_unfiltered (_("At the start of the branch trace record.\n"));
433 printf_unfiltered (_("At the end of the branch trace record.\n"));
436 btrace_set_insn_history (btinfo, &begin, &end);
437 do_cleanups (uiout_cleanup);
440 /* The to_insn_history_range method of target record-btrace. */
443 record_btrace_insn_history_range (struct target_ops *self,
444 ULONGEST from, ULONGEST to, int flags)
446 struct btrace_thread_info *btinfo;
447 struct btrace_insn_history *history;
448 struct btrace_insn_iterator begin, end;
449 struct cleanup *uiout_cleanup;
450 struct ui_out *uiout;
451 unsigned int low, high;
454 uiout = current_uiout;
455 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
460 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
462 /* Check for wrap-arounds. */
463 if (low != from || high != to)
464 error (_("Bad range."));
467 error (_("Bad range."));
469 btinfo = require_btrace ();
471 found = btrace_find_insn_by_number (&begin, btinfo, low);
473 error (_("Range out of bounds."));
475 found = btrace_find_insn_by_number (&end, btinfo, high);
478 /* Silently truncate the range. */
479 btrace_insn_end (&end, btinfo);
483 /* We want both begin and end to be inclusive. */
484 btrace_insn_next (&end, 1);
487 btrace_insn_history (uiout, &begin, &end, flags);
488 btrace_set_insn_history (btinfo, &begin, &end);
490 do_cleanups (uiout_cleanup);
493 /* The to_insn_history_from method of target record-btrace. */
496 record_btrace_insn_history_from (struct target_ops *self,
497 ULONGEST from, int size, int flags)
499 ULONGEST begin, end, context;
501 context = abs (size);
503 error (_("Bad record instruction-history-size."));
512 begin = from - context + 1;
517 end = from + context - 1;
519 /* Check for wrap-around. */
524 record_btrace_insn_history_range (self, begin, end, flags);
527 /* Print the instruction number range for a function call history line. */
530 btrace_call_history_insn_range (struct ui_out *uiout,
531 const struct btrace_function *bfun)
533 unsigned int begin, end, size;
535 size = VEC_length (btrace_insn_s, bfun->insn);
536 gdb_assert (size > 0);
538 begin = bfun->insn_offset;
539 end = begin + size - 1;
541 ui_out_field_uint (uiout, "insn begin", begin);
542 ui_out_text (uiout, ",");
543 ui_out_field_uint (uiout, "insn end", end);
546 /* Print the source line information for a function call history line. */
549 btrace_call_history_src_line (struct ui_out *uiout,
550 const struct btrace_function *bfun)
559 ui_out_field_string (uiout, "file",
560 symtab_to_filename_for_display (sym->symtab));
562 begin = bfun->lbegin;
568 ui_out_text (uiout, ":");
569 ui_out_field_int (uiout, "min line", begin);
574 ui_out_text (uiout, ",");
575 ui_out_field_int (uiout, "max line", end);
578 /* Get the name of a branch trace function. */
581 btrace_get_bfun_name (const struct btrace_function *bfun)
583 struct minimal_symbol *msym;
593 return SYMBOL_PRINT_NAME (sym);
594 else if (msym != NULL)
595 return MSYMBOL_PRINT_NAME (msym);
600 /* Disassemble a section of the recorded function trace. */
603 btrace_call_history (struct ui_out *uiout,
604 const struct btrace_thread_info *btinfo,
605 const struct btrace_call_iterator *begin,
606 const struct btrace_call_iterator *end,
607 enum record_print_flag flags)
609 struct btrace_call_iterator it;
611 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
612 btrace_call_number (end));
614 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
616 const struct btrace_function *bfun;
617 struct minimal_symbol *msym;
620 bfun = btrace_call_get (&it);
624 /* Print the function index. */
625 ui_out_field_uint (uiout, "index", bfun->number);
626 ui_out_text (uiout, "\t");
628 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
630 int level = bfun->level + btinfo->level, i;
632 for (i = 0; i < level; ++i)
633 ui_out_text (uiout, " ");
637 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
638 else if (msym != NULL)
639 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
640 else if (!ui_out_is_mi_like_p (uiout))
641 ui_out_field_string (uiout, "function", "??");
643 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
645 ui_out_text (uiout, _("\tinst "));
646 btrace_call_history_insn_range (uiout, bfun);
649 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
651 ui_out_text (uiout, _("\tat "));
652 btrace_call_history_src_line (uiout, bfun);
655 ui_out_text (uiout, "\n");
659 /* The to_call_history method of target record-btrace. */
662 record_btrace_call_history (struct target_ops *self, int size, int flags)
664 struct btrace_thread_info *btinfo;
665 struct btrace_call_history *history;
666 struct btrace_call_iterator begin, end;
667 struct cleanup *uiout_cleanup;
668 struct ui_out *uiout;
669 unsigned int context, covered;
671 uiout = current_uiout;
672 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
674 context = abs (size);
676 error (_("Bad record function-call-history-size."));
678 btinfo = require_btrace ();
679 history = btinfo->call_history;
682 struct btrace_insn_iterator *replay;
684 DEBUG ("call-history (0x%x): %d", flags, size);
686 /* If we're replaying, we start at the replay position. Otherwise, we
687 start at the tail of the trace. */
688 replay = btinfo->replay;
691 begin.function = replay->function;
692 begin.btinfo = btinfo;
695 btrace_call_end (&begin, btinfo);
697 /* We start from here and expand in the requested direction. Then we
698 expand in the other direction, as well, to fill up any remaining
703 /* We want the current position covered, as well. */
704 covered = btrace_call_next (&end, 1);
705 covered += btrace_call_prev (&begin, context - covered);
706 covered += btrace_call_next (&end, context - covered);
710 covered = btrace_call_next (&end, context);
711 covered += btrace_call_prev (&begin, context- covered);
716 begin = history->begin;
719 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
720 btrace_call_number (&begin), btrace_call_number (&end));
725 covered = btrace_call_prev (&begin, context);
730 covered = btrace_call_next (&end, context);
735 btrace_call_history (uiout, btinfo, &begin, &end, flags);
739 printf_unfiltered (_("At the start of the branch trace record.\n"));
741 printf_unfiltered (_("At the end of the branch trace record.\n"));
744 btrace_set_call_history (btinfo, &begin, &end);
745 do_cleanups (uiout_cleanup);
748 /* The to_call_history_range method of target record-btrace. */
751 record_btrace_call_history_range (struct target_ops *self,
752 ULONGEST from, ULONGEST to, int flags)
754 struct btrace_thread_info *btinfo;
755 struct btrace_call_history *history;
756 struct btrace_call_iterator begin, end;
757 struct cleanup *uiout_cleanup;
758 struct ui_out *uiout;
759 unsigned int low, high;
762 uiout = current_uiout;
763 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
768 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
770 /* Check for wrap-arounds. */
771 if (low != from || high != to)
772 error (_("Bad range."));
775 error (_("Bad range."));
777 btinfo = require_btrace ();
779 found = btrace_find_call_by_number (&begin, btinfo, low);
781 error (_("Range out of bounds."));
783 found = btrace_find_call_by_number (&end, btinfo, high);
786 /* Silently truncate the range. */
787 btrace_call_end (&end, btinfo);
791 /* We want both begin and end to be inclusive. */
792 btrace_call_next (&end, 1);
795 btrace_call_history (uiout, btinfo, &begin, &end, flags);
796 btrace_set_call_history (btinfo, &begin, &end);
798 do_cleanups (uiout_cleanup);
801 /* The to_call_history_from method of target record-btrace. */
804 record_btrace_call_history_from (struct target_ops *self,
805 ULONGEST from, int size, int flags)
807 ULONGEST begin, end, context;
809 context = abs (size);
811 error (_("Bad record function-call-history-size."));
820 begin = from - context + 1;
825 end = from + context - 1;
827 /* Check for wrap-around. */
832 record_btrace_call_history_range (self, begin, end, flags);
835 /* The to_record_is_replaying method of target record-btrace. */
838 record_btrace_is_replaying (struct target_ops *self)
840 struct thread_info *tp;
842 ALL_NON_EXITED_THREADS (tp)
843 if (btrace_is_replaying (tp))
849 /* The to_xfer_partial method of target record-btrace. */
851 static enum target_xfer_status
852 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
853 const char *annex, gdb_byte *readbuf,
854 const gdb_byte *writebuf, ULONGEST offset,
855 ULONGEST len, ULONGEST *xfered_len)
857 struct target_ops *t;
859 /* Filter out requests that don't make sense during replay. */
860 if (replay_memory_access == replay_memory_access_read_only
861 && !record_btrace_generating_corefile
862 && record_btrace_is_replaying (ops))
866 case TARGET_OBJECT_MEMORY:
868 struct target_section *section;
870 /* We do not allow writing memory in general. */
871 if (writebuf != NULL)
874 return TARGET_XFER_UNAVAILABLE;
877 /* We allow reading readonly memory. */
878 section = target_section_by_addr (ops, offset);
881 /* Check if the section we found is readonly. */
882 if ((bfd_get_section_flags (section->the_bfd_section->owner,
883 section->the_bfd_section)
884 & SEC_READONLY) != 0)
886 /* Truncate the request to fit into this section. */
887 len = min (len, section->endaddr - offset);
893 return TARGET_XFER_UNAVAILABLE;
898 /* Forward the request. */
899 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
900 if (ops->to_xfer_partial != NULL)
901 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
902 offset, len, xfered_len);
905 return TARGET_XFER_UNAVAILABLE;
908 /* The to_insert_breakpoint method of target record-btrace. */
911 record_btrace_insert_breakpoint (struct target_ops *ops,
912 struct gdbarch *gdbarch,
913 struct bp_target_info *bp_tgt)
915 volatile struct gdb_exception except;
919 /* Inserting breakpoints requires accessing memory. Allow it for the
920 duration of this function. */
921 old = replay_memory_access;
922 replay_memory_access = replay_memory_access_read_write;
925 TRY_CATCH (except, RETURN_MASK_ALL)
926 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
928 replay_memory_access = old;
930 if (except.reason < 0)
931 throw_exception (except);
936 /* The to_remove_breakpoint method of target record-btrace. */
939 record_btrace_remove_breakpoint (struct target_ops *ops,
940 struct gdbarch *gdbarch,
941 struct bp_target_info *bp_tgt)
943 volatile struct gdb_exception except;
947 /* Removing breakpoints requires accessing memory. Allow it for the
948 duration of this function. */
949 old = replay_memory_access;
950 replay_memory_access = replay_memory_access_read_write;
953 TRY_CATCH (except, RETURN_MASK_ALL)
954 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
956 replay_memory_access = old;
958 if (except.reason < 0)
959 throw_exception (except);
964 /* The to_fetch_registers method of target record-btrace. */
967 record_btrace_fetch_registers (struct target_ops *ops,
968 struct regcache *regcache, int regno)
970 struct btrace_insn_iterator *replay;
971 struct thread_info *tp;
973 tp = find_thread_ptid (inferior_ptid);
974 gdb_assert (tp != NULL);
976 replay = tp->btrace.replay;
977 if (replay != NULL && !record_btrace_generating_corefile)
979 const struct btrace_insn *insn;
980 struct gdbarch *gdbarch;
983 gdbarch = get_regcache_arch (regcache);
984 pcreg = gdbarch_pc_regnum (gdbarch);
988 /* We can only provide the PC register. */
989 if (regno >= 0 && regno != pcreg)
992 insn = btrace_insn_get (replay);
993 gdb_assert (insn != NULL);
995 regcache_raw_supply (regcache, regno, &insn->pc);
999 struct target_ops *t;
1001 for (t = ops->beneath; t != NULL; t = t->beneath)
1002 if (t->to_fetch_registers != NULL)
1004 t->to_fetch_registers (t, regcache, regno);
1010 /* The to_store_registers method of target record-btrace. */
1013 record_btrace_store_registers (struct target_ops *ops,
1014 struct regcache *regcache, int regno)
1016 struct target_ops *t;
1018 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1019 error (_("This record target does not allow writing registers."));
1021 gdb_assert (may_write_registers != 0);
1023 for (t = ops->beneath; t != NULL; t = t->beneath)
1024 if (t->to_store_registers != NULL)
1026 t->to_store_registers (t, regcache, regno);
1033 /* The to_prepare_to_store method of target record-btrace. */
1036 record_btrace_prepare_to_store (struct target_ops *ops,
1037 struct regcache *regcache)
1039 struct target_ops *t;
1041 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1044 for (t = ops->beneath; t != NULL; t = t->beneath)
1045 if (t->to_prepare_to_store != NULL)
1047 t->to_prepare_to_store (t, regcache);
1052 /* The branch trace frame cache. */
1054 struct btrace_frame_cache
1057 struct thread_info *tp;
1059 /* The frame info. */
1060 struct frame_info *frame;
1062 /* The branch trace function segment. */
1063 const struct btrace_function *bfun;
1066 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1068 static htab_t bfcache;
1070 /* hash_f for htab_create_alloc of bfcache. */
1073 bfcache_hash (const void *arg)
1075 const struct btrace_frame_cache *cache = arg;
1077 return htab_hash_pointer (cache->frame);
1080 /* eq_f for htab_create_alloc of bfcache. */
1083 bfcache_eq (const void *arg1, const void *arg2)
1085 const struct btrace_frame_cache *cache1 = arg1;
1086 const struct btrace_frame_cache *cache2 = arg2;
1088 return cache1->frame == cache2->frame;
1091 /* Create a new btrace frame cache. */
1093 static struct btrace_frame_cache *
1094 bfcache_new (struct frame_info *frame)
1096 struct btrace_frame_cache *cache;
1099 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1100 cache->frame = frame;
1102 slot = htab_find_slot (bfcache, cache, INSERT);
1103 gdb_assert (*slot == NULL);
1109 /* Extract the branch trace function from a branch trace frame. */
1111 static const struct btrace_function *
1112 btrace_get_frame_function (struct frame_info *frame)
1114 const struct btrace_frame_cache *cache;
1115 const struct btrace_function *bfun;
1116 struct btrace_frame_cache pattern;
1119 pattern.frame = frame;
1121 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1129 /* Implement stop_reason method for record_btrace_frame_unwind. */
1131 static enum unwind_stop_reason
1132 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1135 const struct btrace_frame_cache *cache;
1136 const struct btrace_function *bfun;
1138 cache = *this_cache;
1140 gdb_assert (bfun != NULL);
1142 if (bfun->up == NULL)
1143 return UNWIND_UNAVAILABLE;
1145 return UNWIND_NO_REASON;
1148 /* Implement this_id method for record_btrace_frame_unwind. */
1151 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1152 struct frame_id *this_id)
1154 const struct btrace_frame_cache *cache;
1155 const struct btrace_function *bfun;
1156 CORE_ADDR code, special;
1158 cache = *this_cache;
1161 gdb_assert (bfun != NULL);
1163 while (bfun->segment.prev != NULL)
1164 bfun = bfun->segment.prev;
1166 code = get_frame_func (this_frame);
1167 special = bfun->number;
1169 *this_id = frame_id_build_unavailable_stack_special (code, special);
1171 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1172 btrace_get_bfun_name (cache->bfun),
1173 core_addr_to_string_nz (this_id->code_addr),
1174 core_addr_to_string_nz (this_id->special_addr));
1177 /* Implement prev_register method for record_btrace_frame_unwind. */
1179 static struct value *
1180 record_btrace_frame_prev_register (struct frame_info *this_frame,
1184 const struct btrace_frame_cache *cache;
1185 const struct btrace_function *bfun, *caller;
1186 const struct btrace_insn *insn;
1187 struct gdbarch *gdbarch;
1191 gdbarch = get_frame_arch (this_frame);
1192 pcreg = gdbarch_pc_regnum (gdbarch);
1193 if (pcreg < 0 || regnum != pcreg)
1194 throw_error (NOT_AVAILABLE_ERROR,
1195 _("Registers are not available in btrace record history"));
1197 cache = *this_cache;
1199 gdb_assert (bfun != NULL);
1203 throw_error (NOT_AVAILABLE_ERROR,
1204 _("No caller in btrace record history"));
1206 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1208 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1213 insn = VEC_last (btrace_insn_s, caller->insn);
1216 pc += gdb_insn_length (gdbarch, pc);
1219 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1220 btrace_get_bfun_name (bfun), bfun->level,
1221 core_addr_to_string_nz (pc));
1223 return frame_unwind_got_address (this_frame, regnum, pc);
1226 /* Implement sniffer method for record_btrace_frame_unwind. */
1229 record_btrace_frame_sniffer (const struct frame_unwind *self,
1230 struct frame_info *this_frame,
1233 const struct btrace_function *bfun;
1234 struct btrace_frame_cache *cache;
1235 struct thread_info *tp;
1236 struct frame_info *next;
1238 /* THIS_FRAME does not contain a reference to its thread. */
1239 tp = find_thread_ptid (inferior_ptid);
1240 gdb_assert (tp != NULL);
1243 next = get_next_frame (this_frame);
1246 const struct btrace_insn_iterator *replay;
1248 replay = tp->btrace.replay;
1250 bfun = replay->function;
1254 const struct btrace_function *callee;
1256 callee = btrace_get_frame_function (next);
1257 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1264 DEBUG ("[frame] sniffed frame for %s on level %d",
1265 btrace_get_bfun_name (bfun), bfun->level);
1267 /* This is our frame. Initialize the frame cache. */
1268 cache = bfcache_new (this_frame);
1272 *this_cache = cache;
1276 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1279 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1280 struct frame_info *this_frame,
1283 const struct btrace_function *bfun, *callee;
1284 struct btrace_frame_cache *cache;
1285 struct frame_info *next;
1287 next = get_next_frame (this_frame);
1291 callee = btrace_get_frame_function (next);
1295 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1302 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1303 btrace_get_bfun_name (bfun), bfun->level);
1305 /* This is our frame. Initialize the frame cache. */
1306 cache = bfcache_new (this_frame);
1307 cache->tp = find_thread_ptid (inferior_ptid);
1310 *this_cache = cache;
1315 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1317 struct btrace_frame_cache *cache;
1322 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1323 gdb_assert (slot != NULL);
1325 htab_remove_elt (bfcache, cache);
1328 /* btrace recording does not store previous memory content, neither the stack
1329 frames content. Any unwinding would return errorneous results as the stack
1330 contents no longer matches the changed PC value restored from history.
1331 Therefore this unwinder reports any possibly unwound registers as
1334 const struct frame_unwind record_btrace_frame_unwind =
1337 record_btrace_frame_unwind_stop_reason,
1338 record_btrace_frame_this_id,
1339 record_btrace_frame_prev_register,
1341 record_btrace_frame_sniffer,
1342 record_btrace_frame_dealloc_cache
1345 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1348 record_btrace_frame_unwind_stop_reason,
1349 record_btrace_frame_this_id,
1350 record_btrace_frame_prev_register,
1352 record_btrace_tailcall_frame_sniffer,
1353 record_btrace_frame_dealloc_cache
1356 /* Implement the to_get_unwinder method. */
1358 static const struct frame_unwind *
1359 record_btrace_to_get_unwinder (struct target_ops *self)
1361 return &record_btrace_frame_unwind;
1364 /* Implement the to_get_tailcall_unwinder method. */
1366 static const struct frame_unwind *
1367 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1369 return &record_btrace_tailcall_frame_unwind;
1372 /* Indicate that TP should be resumed according to FLAG. */
1375 record_btrace_resume_thread (struct thread_info *tp,
1376 enum btrace_thread_flag flag)
1378 struct btrace_thread_info *btinfo;
1380 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1382 btinfo = &tp->btrace;
1384 if ((btinfo->flags & BTHR_MOVE) != 0)
1385 error (_("Thread already moving."));
1387 /* Fetch the latest branch trace. */
1390 btinfo->flags |= flag;
1393 /* Find the thread to resume given a PTID. */
1395 static struct thread_info *
1396 record_btrace_find_resume_thread (ptid_t ptid)
1398 struct thread_info *tp;
1400 /* When asked to resume everything, we pick the current thread. */
1401 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1402 ptid = inferior_ptid;
1404 return find_thread_ptid (ptid);
1407 /* Start replaying a thread. */
1409 static struct btrace_insn_iterator *
1410 record_btrace_start_replaying (struct thread_info *tp)
1412 volatile struct gdb_exception except;
1413 struct btrace_insn_iterator *replay;
1414 struct btrace_thread_info *btinfo;
1417 btinfo = &tp->btrace;
1420 /* We can't start replaying without trace. */
1421 if (btinfo->begin == NULL)
1424 /* Clear the executing flag to allow changes to the current frame.
1425 We are not actually running, yet. We just started a reverse execution
1426 command or a record goto command.
1427 For the latter, EXECUTING is false and this has no effect.
1428 For the former, EXECUTING is true and we're in to_wait, about to
1429 move the thread. Since we need to recompute the stack, we temporarily
1430 set EXECUTING to flase. */
1431 executing = is_executing (tp->ptid);
1432 set_executing (tp->ptid, 0);
1434 /* GDB stores the current frame_id when stepping in order to detects steps
1436 Since frames are computed differently when we're replaying, we need to
1437 recompute those stored frames and fix them up so we can still detect
1438 subroutines after we started replaying. */
1439 TRY_CATCH (except, RETURN_MASK_ALL)
1441 struct frame_info *frame;
1442 struct frame_id frame_id;
1443 int upd_step_frame_id, upd_step_stack_frame_id;
1445 /* The current frame without replaying - computed via normal unwind. */
1446 frame = get_current_frame ();
1447 frame_id = get_frame_id (frame);
1449 /* Check if we need to update any stepping-related frame id's. */
1450 upd_step_frame_id = frame_id_eq (frame_id,
1451 tp->control.step_frame_id);
1452 upd_step_stack_frame_id = frame_id_eq (frame_id,
1453 tp->control.step_stack_frame_id);
1455 /* We start replaying at the end of the branch trace. This corresponds
1456 to the current instruction. */
1457 replay = xmalloc (sizeof (*replay));
1458 btrace_insn_end (replay, btinfo);
1460 /* We're not replaying, yet. */
1461 gdb_assert (btinfo->replay == NULL);
1462 btinfo->replay = replay;
1464 /* Make sure we're not using any stale registers. */
1465 registers_changed_ptid (tp->ptid);
1467 /* The current frame with replaying - computed via btrace unwind. */
1468 frame = get_current_frame ();
1469 frame_id = get_frame_id (frame);
1471 /* Replace stepping related frames where necessary. */
1472 if (upd_step_frame_id)
1473 tp->control.step_frame_id = frame_id;
1474 if (upd_step_stack_frame_id)
1475 tp->control.step_stack_frame_id = frame_id;
1478 /* Restore the previous execution state. */
1479 set_executing (tp->ptid, executing);
1481 if (except.reason < 0)
1483 xfree (btinfo->replay);
1484 btinfo->replay = NULL;
1486 registers_changed_ptid (tp->ptid);
1488 throw_exception (except);
1494 /* Stop replaying a thread. */
1497 record_btrace_stop_replaying (struct thread_info *tp)
1499 struct btrace_thread_info *btinfo;
1501 btinfo = &tp->btrace;
1503 xfree (btinfo->replay);
1504 btinfo->replay = NULL;
1506 /* Make sure we're not leaving any stale registers. */
1507 registers_changed_ptid (tp->ptid);
1510 /* The to_resume method of target record-btrace. */
1513 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1514 enum gdb_signal signal)
1516 struct thread_info *tp, *other;
1517 enum btrace_thread_flag flag;
1519 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1521 /* Store the execution direction of the last resume. */
1522 record_btrace_resume_exec_dir = execution_direction;
1524 tp = record_btrace_find_resume_thread (ptid);
1526 error (_("Cannot find thread to resume."));
1528 /* Stop replaying other threads if the thread to resume is not replaying. */
1529 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1530 ALL_NON_EXITED_THREADS (other)
1531 record_btrace_stop_replaying (other);
1533 /* As long as we're not replaying, just forward the request. */
1534 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1536 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1537 if (ops->to_resume != NULL)
1538 return ops->to_resume (ops, ptid, step, signal);
1540 error (_("Cannot find target for stepping."));
1543 /* Compute the btrace thread flag for the requested move. */
1545 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1547 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1549 /* At the moment, we only move a single thread. We could also move
1550 all threads in parallel by single-stepping each resumed thread
1551 until the first runs into an event.
1552 When we do that, we would want to continue all other threads.
1553 For now, just resume one thread to not confuse to_wait. */
1554 record_btrace_resume_thread (tp, flag);
1556 /* We just indicate the resume intent here. The actual stepping happens in
1557 record_btrace_wait below. */
1559 /* Async support. */
1560 if (target_can_async_p ())
1562 target_async (inferior_event_handler, 0);
1563 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1567 /* Find a thread to move. */
1569 static struct thread_info *
1570 record_btrace_find_thread_to_move (ptid_t ptid)
1572 struct thread_info *tp;
1574 /* First check the parameter thread. */
1575 tp = find_thread_ptid (ptid);
1576 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1579 /* Otherwise, find one other thread that has been resumed. */
1580 ALL_NON_EXITED_THREADS (tp)
1581 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1587 /* Return a target_waitstatus indicating that we ran out of history. */
1589 static struct target_waitstatus
1590 btrace_step_no_history (void)
1592 struct target_waitstatus status;
1594 status.kind = TARGET_WAITKIND_NO_HISTORY;
1599 /* Return a target_waitstatus indicating that a step finished. */
1601 static struct target_waitstatus
1602 btrace_step_stopped (void)
1604 struct target_waitstatus status;
1606 status.kind = TARGET_WAITKIND_STOPPED;
1607 status.value.sig = GDB_SIGNAL_TRAP;
1612 /* Clear the record histories. */
1615 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1617 xfree (btinfo->insn_history);
1618 xfree (btinfo->call_history);
1620 btinfo->insn_history = NULL;
1621 btinfo->call_history = NULL;
1624 /* Step a single thread. */
1626 static struct target_waitstatus
1627 record_btrace_step_thread (struct thread_info *tp)
1629 struct btrace_insn_iterator *replay, end;
1630 struct btrace_thread_info *btinfo;
1631 struct address_space *aspace;
1632 struct inferior *inf;
1633 enum btrace_thread_flag flags;
1636 /* We can't step without an execution history. */
1637 if (btrace_is_empty (tp))
1638 return btrace_step_no_history ();
1640 btinfo = &tp->btrace;
1641 replay = btinfo->replay;
1643 flags = btinfo->flags & BTHR_MOVE;
1644 btinfo->flags &= ~BTHR_MOVE;
1646 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1651 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1654 /* We're done if we're not replaying. */
1656 return btrace_step_no_history ();
1658 /* We are always able to step at least once. */
1659 steps = btrace_insn_next (replay, 1);
1660 gdb_assert (steps == 1);
1662 /* Determine the end of the instruction trace. */
1663 btrace_insn_end (&end, btinfo);
1665 /* We stop replaying if we reached the end of the trace. */
1666 if (btrace_insn_cmp (replay, &end) == 0)
1667 record_btrace_stop_replaying (tp);
1669 return btrace_step_stopped ();
1672 /* Start replaying if we're not already doing so. */
1674 replay = record_btrace_start_replaying (tp);
1676 /* If we can't step any further, we reached the end of the history. */
1677 steps = btrace_insn_prev (replay, 1);
1679 return btrace_step_no_history ();
1681 return btrace_step_stopped ();
1684 /* We're done if we're not replaying. */
1686 return btrace_step_no_history ();
1688 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1689 aspace = inf->aspace;
1691 /* Determine the end of the instruction trace. */
1692 btrace_insn_end (&end, btinfo);
1696 const struct btrace_insn *insn;
1698 /* We are always able to step at least once. */
1699 steps = btrace_insn_next (replay, 1);
1700 gdb_assert (steps == 1);
1702 /* We stop replaying if we reached the end of the trace. */
1703 if (btrace_insn_cmp (replay, &end) == 0)
1705 record_btrace_stop_replaying (tp);
1706 return btrace_step_no_history ();
1709 insn = btrace_insn_get (replay);
1712 DEBUG ("stepping %d (%s) ... %s", tp->num,
1713 target_pid_to_str (tp->ptid),
1714 core_addr_to_string_nz (insn->pc));
1716 if (breakpoint_here_p (aspace, insn->pc))
1717 return btrace_step_stopped ();
1721 /* Start replaying if we're not already doing so. */
1723 replay = record_btrace_start_replaying (tp);
1725 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1726 aspace = inf->aspace;
1730 const struct btrace_insn *insn;
1732 /* If we can't step any further, we're done. */
1733 steps = btrace_insn_prev (replay, 1);
1735 return btrace_step_no_history ();
1737 insn = btrace_insn_get (replay);
1740 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1741 target_pid_to_str (tp->ptid),
1742 core_addr_to_string_nz (insn->pc));
1744 if (breakpoint_here_p (aspace, insn->pc))
1745 return btrace_step_stopped ();
1750 /* The to_wait method of target record-btrace. */
1753 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1754 struct target_waitstatus *status, int options)
1756 struct thread_info *tp, *other;
1758 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1760 /* As long as we're not replaying, just forward the request. */
1761 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1763 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1764 if (ops->to_wait != NULL)
1765 return ops->to_wait (ops, ptid, status, options);
1767 error (_("Cannot find target for waiting."));
1770 /* Let's find a thread to move. */
1771 tp = record_btrace_find_thread_to_move (ptid);
1774 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1776 status->kind = TARGET_WAITKIND_IGNORE;
1777 return minus_one_ptid;
1780 /* We only move a single thread. We're not able to correlate threads. */
1781 *status = record_btrace_step_thread (tp);
1783 /* Stop all other threads. */
1785 ALL_NON_EXITED_THREADS (other)
1786 other->btrace.flags &= ~BTHR_MOVE;
1788 /* Start record histories anew from the current position. */
1789 record_btrace_clear_histories (&tp->btrace);
1791 /* We moved the replay position but did not update registers. */
1792 registers_changed_ptid (tp->ptid);
1797 /* The to_can_execute_reverse method of target record-btrace. */
1800 record_btrace_can_execute_reverse (struct target_ops *self)
1805 /* The to_decr_pc_after_break method of target record-btrace. */
1808 record_btrace_decr_pc_after_break (struct target_ops *ops,
1809 struct gdbarch *gdbarch)
1811 /* When replaying, we do not actually execute the breakpoint instruction
1812 so there is no need to adjust the PC after hitting a breakpoint. */
1813 if (record_btrace_is_replaying (ops))
1816 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1819 /* The to_find_new_threads method of target record-btrace. */
1822 record_btrace_find_new_threads (struct target_ops *ops)
1824 /* Don't expect new threads if we're replaying. */
1825 if (record_btrace_is_replaying (ops))
1828 /* Forward the request. */
1829 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1830 if (ops->to_find_new_threads != NULL)
1832 ops->to_find_new_threads (ops);
1837 /* The to_thread_alive method of target record-btrace. */
1840 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1842 /* We don't add or remove threads during replay. */
1843 if (record_btrace_is_replaying (ops))
1844 return find_thread_ptid (ptid) != NULL;
1846 /* Forward the request. */
1847 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1848 if (ops->to_thread_alive != NULL)
1849 return ops->to_thread_alive (ops, ptid);
1854 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1858 record_btrace_set_replay (struct thread_info *tp,
1859 const struct btrace_insn_iterator *it)
1861 struct btrace_thread_info *btinfo;
1863 btinfo = &tp->btrace;
1865 if (it == NULL || it->function == NULL)
1866 record_btrace_stop_replaying (tp);
1869 if (btinfo->replay == NULL)
1870 record_btrace_start_replaying (tp);
1871 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1874 *btinfo->replay = *it;
1875 registers_changed_ptid (tp->ptid);
1878 /* Start anew from the new replay position. */
1879 record_btrace_clear_histories (btinfo);
1882 /* The to_goto_record_begin method of target record-btrace. */
1885 record_btrace_goto_begin (struct target_ops *self)
1887 struct thread_info *tp;
1888 struct btrace_insn_iterator begin;
1890 tp = require_btrace_thread ();
1892 btrace_insn_begin (&begin, &tp->btrace);
1893 record_btrace_set_replay (tp, &begin);
1895 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1898 /* The to_goto_record_end method of target record-btrace. */
1901 record_btrace_goto_end (struct target_ops *ops)
1903 struct thread_info *tp;
1905 tp = require_btrace_thread ();
1907 record_btrace_set_replay (tp, NULL);
1909 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1912 /* The to_goto_record method of target record-btrace. */
1915 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1917 struct thread_info *tp;
1918 struct btrace_insn_iterator it;
1919 unsigned int number;
1924 /* Check for wrap-arounds. */
1926 error (_("Instruction number out of range."));
1928 tp = require_btrace_thread ();
1930 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1932 error (_("No such instruction."));
1934 record_btrace_set_replay (tp, &it);
1936 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1939 /* The to_execution_direction target method. */
1941 static enum exec_direction_kind
1942 record_btrace_execution_direction (struct target_ops *self)
1944 return record_btrace_resume_exec_dir;
1947 /* The to_prepare_to_generate_core target method. */
1950 record_btrace_prepare_to_generate_core (struct target_ops *self)
1952 record_btrace_generating_corefile = 1;
1955 /* The to_done_generating_core target method. */
1958 record_btrace_done_generating_core (struct target_ops *self)
1960 record_btrace_generating_corefile = 0;
1963 /* Initialize the record-btrace target ops. */
1966 init_record_btrace_ops (void)
1968 struct target_ops *ops;
1970 ops = &record_btrace_ops;
1971 ops->to_shortname = "record-btrace";
1972 ops->to_longname = "Branch tracing target";
1973 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1974 ops->to_open = record_btrace_open;
1975 ops->to_close = record_btrace_close;
1976 ops->to_detach = record_detach;
1977 ops->to_disconnect = record_disconnect;
1978 ops->to_mourn_inferior = record_mourn_inferior;
1979 ops->to_kill = record_kill;
1980 ops->to_stop_recording = record_btrace_stop_recording;
1981 ops->to_info_record = record_btrace_info;
1982 ops->to_insn_history = record_btrace_insn_history;
1983 ops->to_insn_history_from = record_btrace_insn_history_from;
1984 ops->to_insn_history_range = record_btrace_insn_history_range;
1985 ops->to_call_history = record_btrace_call_history;
1986 ops->to_call_history_from = record_btrace_call_history_from;
1987 ops->to_call_history_range = record_btrace_call_history_range;
1988 ops->to_record_is_replaying = record_btrace_is_replaying;
1989 ops->to_xfer_partial = record_btrace_xfer_partial;
1990 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1991 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1992 ops->to_fetch_registers = record_btrace_fetch_registers;
1993 ops->to_store_registers = record_btrace_store_registers;
1994 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1995 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1996 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1997 ops->to_resume = record_btrace_resume;
1998 ops->to_wait = record_btrace_wait;
1999 ops->to_find_new_threads = record_btrace_find_new_threads;
2000 ops->to_thread_alive = record_btrace_thread_alive;
2001 ops->to_goto_record_begin = record_btrace_goto_begin;
2002 ops->to_goto_record_end = record_btrace_goto_end;
2003 ops->to_goto_record = record_btrace_goto;
2004 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2005 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
2006 ops->to_execution_direction = record_btrace_execution_direction;
2007 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2008 ops->to_done_generating_core = record_btrace_done_generating_core;
2009 ops->to_stratum = record_stratum;
2010 ops->to_magic = OPS_MAGIC;
2013 /* Alias for "target record". */
2016 cmd_record_btrace_start (char *args, int from_tty)
2018 if (args != NULL && *args != 0)
2019 error (_("Invalid argument."));
2021 execute_command ("target record-btrace", from_tty);
2024 /* The "set record btrace" command. */
2027 cmd_set_record_btrace (char *args, int from_tty)
2029 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2032 /* The "show record btrace" command. */
2035 cmd_show_record_btrace (char *args, int from_tty)
2037 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2040 /* The "show record btrace replay-memory-access" command. */
2043 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2044 struct cmd_list_element *c, const char *value)
2046 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2047 replay_memory_access);
2050 void _initialize_record_btrace (void);
2052 /* Initialize btrace commands. */
2055 _initialize_record_btrace (void)
2057 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2058 _("Start branch trace recording."),
2060 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2062 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2063 _("Set record options"), &set_record_btrace_cmdlist,
2064 "set record btrace ", 0, &set_record_cmdlist);
2066 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2067 _("Show record options"), &show_record_btrace_cmdlist,
2068 "show record btrace ", 0, &show_record_cmdlist);
2070 add_setshow_enum_cmd ("replay-memory-access", no_class,
2071 replay_memory_access_types, &replay_memory_access, _("\
2072 Set what memory accesses are allowed during replay."), _("\
2073 Show what memory accesses are allowed during replay."),
2074 _("Default is READ-ONLY.\n\n\
2075 The btrace record target does not trace data.\n\
2076 The memory therefore corresponds to the live target and not \
2077 to the current replay position.\n\n\
2078 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2079 When READ-WRITE, allow accesses to read-only and read-write memory during \
2081 NULL, cmd_show_replay_memory_access,
2082 &set_record_btrace_cmdlist,
2083 &show_record_btrace_cmdlist);
2085 init_record_btrace_ops ();
2086 add_target (&record_btrace_ops);
2088 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,