1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* Print a record-btrace debug message. Use do ... while (0) to avoid
74 ambiguities when used in if statements. */
76 #define DEBUG(msg, args...) \
79 if (record_debug != 0) \
80 fprintf_unfiltered (gdb_stdlog, \
81 "[record-btrace] " msg "\n", ##args); \
86 /* Update the branch trace for the current thread and return a pointer to its
89 Throws an error if there is no thread or no trace. This function never
92 static struct thread_info *
93 require_btrace_thread (void)
95 struct thread_info *tp;
99 tp = find_thread_ptid (inferior_ptid);
101 error (_("No thread."));
105 if (btrace_is_empty (tp))
106 error (_("No trace."));
111 /* Update the branch trace for the current thread and return a pointer to its
112 branch trace information struct.
114 Throws an error if there is no thread or no trace. This function never
117 static struct btrace_thread_info *
118 require_btrace (void)
120 struct thread_info *tp;
122 tp = require_btrace_thread ();
127 /* Enable branch tracing for one thread. Warn on errors. */
130 record_btrace_enable_warn (struct thread_info *tp)
132 volatile struct gdb_exception error;
134 TRY_CATCH (error, RETURN_MASK_ERROR)
137 if (error.message != NULL)
138 warning ("%s", error.message);
141 /* Callback function to disable branch tracing for one thread. */
144 record_btrace_disable_callback (void *arg)
146 struct thread_info *tp;
153 /* Enable automatic tracing of new threads. */
156 record_btrace_auto_enable (void)
158 DEBUG ("attach thread observer");
160 record_btrace_thread_observer
161 = observer_attach_new_thread (record_btrace_enable_warn);
164 /* Disable automatic tracing of new threads. */
167 record_btrace_auto_disable (void)
169 /* The observer may have been detached, already. */
170 if (record_btrace_thread_observer == NULL)
173 DEBUG ("detach thread observer");
175 observer_detach_new_thread (record_btrace_thread_observer);
176 record_btrace_thread_observer = NULL;
179 /* The record-btrace async event handler function. */
182 record_btrace_handle_async_inferior_event (gdb_client_data data)
184 inferior_event_handler (INF_REG_EVENT, NULL);
187 /* The to_open method of target record-btrace. */
190 record_btrace_open (const char *args, int from_tty)
192 struct cleanup *disable_chain;
193 struct thread_info *tp;
199 if (!target_has_execution)
200 error (_("The program is not being run."));
202 if (!target_supports_btrace ())
203 error (_("Target does not support branch tracing."));
206 error (_("Record btrace can't debug inferior in non-stop mode."));
208 gdb_assert (record_btrace_thread_observer == NULL);
210 disable_chain = make_cleanup (null_cleanup, NULL);
211 ALL_NON_EXITED_THREADS (tp)
212 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
216 make_cleanup (record_btrace_disable_callback, tp);
219 record_btrace_auto_enable ();
221 push_target (&record_btrace_ops);
223 record_btrace_async_inferior_event_handler
224 = create_async_event_handler (record_btrace_handle_async_inferior_event,
226 record_btrace_generating_corefile = 0;
228 observer_notify_record_changed (current_inferior (), 1);
230 discard_cleanups (disable_chain);
233 /* The to_stop_recording method of target record-btrace. */
236 record_btrace_stop_recording (struct target_ops *self)
238 struct thread_info *tp;
240 DEBUG ("stop recording");
242 record_btrace_auto_disable ();
244 ALL_NON_EXITED_THREADS (tp)
245 if (tp->btrace.target != NULL)
249 /* The to_close method of target record-btrace. */
252 record_btrace_close (struct target_ops *self)
254 struct thread_info *tp;
256 if (record_btrace_async_inferior_event_handler != NULL)
257 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
259 /* Make sure automatic recording gets disabled even if we did not stop
260 recording before closing the record-btrace target. */
261 record_btrace_auto_disable ();
263 /* We should have already stopped recording.
264 Tear down btrace in case we have not. */
265 ALL_NON_EXITED_THREADS (tp)
266 btrace_teardown (tp);
269 /* The to_info_record method of target record-btrace. */
272 record_btrace_info (struct target_ops *self)
274 struct btrace_thread_info *btinfo;
275 struct thread_info *tp;
276 unsigned int insns, calls;
280 tp = find_thread_ptid (inferior_ptid);
282 error (_("No thread."));
289 btinfo = &tp->btrace;
291 if (!btrace_is_empty (tp))
293 struct btrace_call_iterator call;
294 struct btrace_insn_iterator insn;
296 btrace_call_end (&call, btinfo);
297 btrace_call_prev (&call, 1);
298 calls = btrace_call_number (&call);
300 btrace_insn_end (&insn, btinfo);
301 btrace_insn_prev (&insn, 1);
302 insns = btrace_insn_number (&insn);
305 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
306 "%d (%s).\n"), insns, calls, tp->num,
307 target_pid_to_str (tp->ptid));
309 if (btrace_is_replaying (tp))
310 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
311 btrace_insn_number (btinfo->replay));
314 /* Print an unsigned int. */
317 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
319 ui_out_field_fmt (uiout, fld, "%u", val);
322 /* Disassemble a section of the recorded instruction trace. */
325 btrace_insn_history (struct ui_out *uiout,
326 const struct btrace_insn_iterator *begin,
327 const struct btrace_insn_iterator *end, int flags)
329 struct gdbarch *gdbarch;
330 struct btrace_insn_iterator it;
332 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
333 btrace_insn_number (end));
335 gdbarch = target_gdbarch ();
337 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
339 const struct btrace_insn *insn;
341 insn = btrace_insn_get (&it);
343 /* Print the instruction index. */
344 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
345 ui_out_text (uiout, "\t");
347 /* Disassembly with '/m' flag may not produce the expected result.
349 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
353 /* The to_insn_history method of target record-btrace. */
356 record_btrace_insn_history (struct target_ops *self, int size, int flags)
358 struct btrace_thread_info *btinfo;
359 struct btrace_insn_history *history;
360 struct btrace_insn_iterator begin, end;
361 struct cleanup *uiout_cleanup;
362 struct ui_out *uiout;
363 unsigned int context, covered;
365 uiout = current_uiout;
366 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
368 context = abs (size);
370 error (_("Bad record instruction-history-size."));
372 btinfo = require_btrace ();
373 history = btinfo->insn_history;
376 struct btrace_insn_iterator *replay;
378 DEBUG ("insn-history (0x%x): %d", flags, size);
380 /* If we're replaying, we start at the replay position. Otherwise, we
381 start at the tail of the trace. */
382 replay = btinfo->replay;
386 btrace_insn_end (&begin, btinfo);
388 /* We start from here and expand in the requested direction. Then we
389 expand in the other direction, as well, to fill up any remaining
394 /* We want the current position covered, as well. */
395 covered = btrace_insn_next (&end, 1);
396 covered += btrace_insn_prev (&begin, context - covered);
397 covered += btrace_insn_next (&end, context - covered);
401 covered = btrace_insn_next (&end, context);
402 covered += btrace_insn_prev (&begin, context - covered);
407 begin = history->begin;
410 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
411 btrace_insn_number (&begin), btrace_insn_number (&end));
416 covered = btrace_insn_prev (&begin, context);
421 covered = btrace_insn_next (&end, context);
426 btrace_insn_history (uiout, &begin, &end, flags);
430 printf_unfiltered (_("At the start of the branch trace record.\n"));
432 printf_unfiltered (_("At the end of the branch trace record.\n"));
435 btrace_set_insn_history (btinfo, &begin, &end);
436 do_cleanups (uiout_cleanup);
439 /* The to_insn_history_range method of target record-btrace. */
442 record_btrace_insn_history_range (struct target_ops *self,
443 ULONGEST from, ULONGEST to, int flags)
445 struct btrace_thread_info *btinfo;
446 struct btrace_insn_history *history;
447 struct btrace_insn_iterator begin, end;
448 struct cleanup *uiout_cleanup;
449 struct ui_out *uiout;
450 unsigned int low, high;
453 uiout = current_uiout;
454 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
459 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
461 /* Check for wrap-arounds. */
462 if (low != from || high != to)
463 error (_("Bad range."));
466 error (_("Bad range."));
468 btinfo = require_btrace ();
470 found = btrace_find_insn_by_number (&begin, btinfo, low);
472 error (_("Range out of bounds."));
474 found = btrace_find_insn_by_number (&end, btinfo, high);
477 /* Silently truncate the range. */
478 btrace_insn_end (&end, btinfo);
482 /* We want both begin and end to be inclusive. */
483 btrace_insn_next (&end, 1);
486 btrace_insn_history (uiout, &begin, &end, flags);
487 btrace_set_insn_history (btinfo, &begin, &end);
489 do_cleanups (uiout_cleanup);
492 /* The to_insn_history_from method of target record-btrace. */
495 record_btrace_insn_history_from (struct target_ops *self,
496 ULONGEST from, int size, int flags)
498 ULONGEST begin, end, context;
500 context = abs (size);
502 error (_("Bad record instruction-history-size."));
511 begin = from - context + 1;
516 end = from + context - 1;
518 /* Check for wrap-around. */
523 record_btrace_insn_history_range (self, begin, end, flags);
526 /* Print the instruction number range for a function call history line. */
529 btrace_call_history_insn_range (struct ui_out *uiout,
530 const struct btrace_function *bfun)
532 unsigned int begin, end, size;
534 size = VEC_length (btrace_insn_s, bfun->insn);
535 gdb_assert (size > 0);
537 begin = bfun->insn_offset;
538 end = begin + size - 1;
540 ui_out_field_uint (uiout, "insn begin", begin);
541 ui_out_text (uiout, ",");
542 ui_out_field_uint (uiout, "insn end", end);
545 /* Print the source line information for a function call history line. */
548 btrace_call_history_src_line (struct ui_out *uiout,
549 const struct btrace_function *bfun)
558 ui_out_field_string (uiout, "file",
559 symtab_to_filename_for_display (symbol_symtab (sym)));
561 begin = bfun->lbegin;
567 ui_out_text (uiout, ":");
568 ui_out_field_int (uiout, "min line", begin);
573 ui_out_text (uiout, ",");
574 ui_out_field_int (uiout, "max line", end);
577 /* Get the name of a branch trace function. */
580 btrace_get_bfun_name (const struct btrace_function *bfun)
582 struct minimal_symbol *msym;
592 return SYMBOL_PRINT_NAME (sym);
593 else if (msym != NULL)
594 return MSYMBOL_PRINT_NAME (msym);
599 /* Disassemble a section of the recorded function trace. */
602 btrace_call_history (struct ui_out *uiout,
603 const struct btrace_thread_info *btinfo,
604 const struct btrace_call_iterator *begin,
605 const struct btrace_call_iterator *end,
606 enum record_print_flag flags)
608 struct btrace_call_iterator it;
610 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
611 btrace_call_number (end));
613 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
615 const struct btrace_function *bfun;
616 struct minimal_symbol *msym;
619 bfun = btrace_call_get (&it);
623 /* Print the function index. */
624 ui_out_field_uint (uiout, "index", bfun->number);
625 ui_out_text (uiout, "\t");
627 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
629 int level = bfun->level + btinfo->level, i;
631 for (i = 0; i < level; ++i)
632 ui_out_text (uiout, " ");
636 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
637 else if (msym != NULL)
638 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
639 else if (!ui_out_is_mi_like_p (uiout))
640 ui_out_field_string (uiout, "function", "??");
642 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
644 ui_out_text (uiout, _("\tinst "));
645 btrace_call_history_insn_range (uiout, bfun);
648 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
650 ui_out_text (uiout, _("\tat "));
651 btrace_call_history_src_line (uiout, bfun);
654 ui_out_text (uiout, "\n");
658 /* The to_call_history method of target record-btrace. */
661 record_btrace_call_history (struct target_ops *self, int size, int flags)
663 struct btrace_thread_info *btinfo;
664 struct btrace_call_history *history;
665 struct btrace_call_iterator begin, end;
666 struct cleanup *uiout_cleanup;
667 struct ui_out *uiout;
668 unsigned int context, covered;
670 uiout = current_uiout;
671 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
673 context = abs (size);
675 error (_("Bad record function-call-history-size."));
677 btinfo = require_btrace ();
678 history = btinfo->call_history;
681 struct btrace_insn_iterator *replay;
683 DEBUG ("call-history (0x%x): %d", flags, size);
685 /* If we're replaying, we start at the replay position. Otherwise, we
686 start at the tail of the trace. */
687 replay = btinfo->replay;
690 begin.function = replay->function;
691 begin.btinfo = btinfo;
694 btrace_call_end (&begin, btinfo);
696 /* We start from here and expand in the requested direction. Then we
697 expand in the other direction, as well, to fill up any remaining
702 /* We want the current position covered, as well. */
703 covered = btrace_call_next (&end, 1);
704 covered += btrace_call_prev (&begin, context - covered);
705 covered += btrace_call_next (&end, context - covered);
709 covered = btrace_call_next (&end, context);
710 covered += btrace_call_prev (&begin, context- covered);
715 begin = history->begin;
718 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
719 btrace_call_number (&begin), btrace_call_number (&end));
724 covered = btrace_call_prev (&begin, context);
729 covered = btrace_call_next (&end, context);
734 btrace_call_history (uiout, btinfo, &begin, &end, flags);
738 printf_unfiltered (_("At the start of the branch trace record.\n"));
740 printf_unfiltered (_("At the end of the branch trace record.\n"));
743 btrace_set_call_history (btinfo, &begin, &end);
744 do_cleanups (uiout_cleanup);
747 /* The to_call_history_range method of target record-btrace. */
750 record_btrace_call_history_range (struct target_ops *self,
751 ULONGEST from, ULONGEST to, int flags)
753 struct btrace_thread_info *btinfo;
754 struct btrace_call_history *history;
755 struct btrace_call_iterator begin, end;
756 struct cleanup *uiout_cleanup;
757 struct ui_out *uiout;
758 unsigned int low, high;
761 uiout = current_uiout;
762 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
767 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
769 /* Check for wrap-arounds. */
770 if (low != from || high != to)
771 error (_("Bad range."));
774 error (_("Bad range."));
776 btinfo = require_btrace ();
778 found = btrace_find_call_by_number (&begin, btinfo, low);
780 error (_("Range out of bounds."));
782 found = btrace_find_call_by_number (&end, btinfo, high);
785 /* Silently truncate the range. */
786 btrace_call_end (&end, btinfo);
790 /* We want both begin and end to be inclusive. */
791 btrace_call_next (&end, 1);
794 btrace_call_history (uiout, btinfo, &begin, &end, flags);
795 btrace_set_call_history (btinfo, &begin, &end);
797 do_cleanups (uiout_cleanup);
800 /* The to_call_history_from method of target record-btrace. */
803 record_btrace_call_history_from (struct target_ops *self,
804 ULONGEST from, int size, int flags)
806 ULONGEST begin, end, context;
808 context = abs (size);
810 error (_("Bad record function-call-history-size."));
819 begin = from - context + 1;
824 end = from + context - 1;
826 /* Check for wrap-around. */
831 record_btrace_call_history_range (self, begin, end, flags);
834 /* The to_record_is_replaying method of target record-btrace. */
837 record_btrace_is_replaying (struct target_ops *self)
839 struct thread_info *tp;
841 ALL_NON_EXITED_THREADS (tp)
842 if (btrace_is_replaying (tp))
848 /* The to_xfer_partial method of target record-btrace. */
850 static enum target_xfer_status
851 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
852 const char *annex, gdb_byte *readbuf,
853 const gdb_byte *writebuf, ULONGEST offset,
854 ULONGEST len, ULONGEST *xfered_len)
856 struct target_ops *t;
858 /* Filter out requests that don't make sense during replay. */
859 if (replay_memory_access == replay_memory_access_read_only
860 && !record_btrace_generating_corefile
861 && record_btrace_is_replaying (ops))
865 case TARGET_OBJECT_MEMORY:
867 struct target_section *section;
869 /* We do not allow writing memory in general. */
870 if (writebuf != NULL)
873 return TARGET_XFER_UNAVAILABLE;
876 /* We allow reading readonly memory. */
877 section = target_section_by_addr (ops, offset);
880 /* Check if the section we found is readonly. */
881 if ((bfd_get_section_flags (section->the_bfd_section->owner,
882 section->the_bfd_section)
883 & SEC_READONLY) != 0)
885 /* Truncate the request to fit into this section. */
886 len = min (len, section->endaddr - offset);
892 return TARGET_XFER_UNAVAILABLE;
897 /* Forward the request. */
899 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
900 offset, len, xfered_len);
903 /* The to_insert_breakpoint method of target record-btrace. */
906 record_btrace_insert_breakpoint (struct target_ops *ops,
907 struct gdbarch *gdbarch,
908 struct bp_target_info *bp_tgt)
910 volatile struct gdb_exception except;
914 /* Inserting breakpoints requires accessing memory. Allow it for the
915 duration of this function. */
916 old = replay_memory_access;
917 replay_memory_access = replay_memory_access_read_write;
920 TRY_CATCH (except, RETURN_MASK_ALL)
921 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
923 replay_memory_access = old;
925 if (except.reason < 0)
926 throw_exception (except);
931 /* The to_remove_breakpoint method of target record-btrace. */
934 record_btrace_remove_breakpoint (struct target_ops *ops,
935 struct gdbarch *gdbarch,
936 struct bp_target_info *bp_tgt)
938 volatile struct gdb_exception except;
942 /* Removing breakpoints requires accessing memory. Allow it for the
943 duration of this function. */
944 old = replay_memory_access;
945 replay_memory_access = replay_memory_access_read_write;
948 TRY_CATCH (except, RETURN_MASK_ALL)
949 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
951 replay_memory_access = old;
953 if (except.reason < 0)
954 throw_exception (except);
959 /* The to_fetch_registers method of target record-btrace. */
962 record_btrace_fetch_registers (struct target_ops *ops,
963 struct regcache *regcache, int regno)
965 struct btrace_insn_iterator *replay;
966 struct thread_info *tp;
968 tp = find_thread_ptid (inferior_ptid);
969 gdb_assert (tp != NULL);
971 replay = tp->btrace.replay;
972 if (replay != NULL && !record_btrace_generating_corefile)
974 const struct btrace_insn *insn;
975 struct gdbarch *gdbarch;
978 gdbarch = get_regcache_arch (regcache);
979 pcreg = gdbarch_pc_regnum (gdbarch);
983 /* We can only provide the PC register. */
984 if (regno >= 0 && regno != pcreg)
987 insn = btrace_insn_get (replay);
988 gdb_assert (insn != NULL);
990 regcache_raw_supply (regcache, regno, &insn->pc);
994 struct target_ops *t = ops->beneath;
996 t->to_fetch_registers (t, regcache, regno);
1000 /* The to_store_registers method of target record-btrace. */
1003 record_btrace_store_registers (struct target_ops *ops,
1004 struct regcache *regcache, int regno)
1006 struct target_ops *t;
1008 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1009 error (_("This record target does not allow writing registers."));
1011 gdb_assert (may_write_registers != 0);
1014 t->to_store_registers (t, regcache, regno);
1017 /* The to_prepare_to_store method of target record-btrace. */
1020 record_btrace_prepare_to_store (struct target_ops *ops,
1021 struct regcache *regcache)
1023 struct target_ops *t;
1025 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1029 t->to_prepare_to_store (t, regcache);
1032 /* The branch trace frame cache. */
1034 struct btrace_frame_cache
1037 struct thread_info *tp;
1039 /* The frame info. */
1040 struct frame_info *frame;
1042 /* The branch trace function segment. */
1043 const struct btrace_function *bfun;
1046 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1048 static htab_t bfcache;
1050 /* hash_f for htab_create_alloc of bfcache. */
1053 bfcache_hash (const void *arg)
1055 const struct btrace_frame_cache *cache = arg;
1057 return htab_hash_pointer (cache->frame);
1060 /* eq_f for htab_create_alloc of bfcache. */
1063 bfcache_eq (const void *arg1, const void *arg2)
1065 const struct btrace_frame_cache *cache1 = arg1;
1066 const struct btrace_frame_cache *cache2 = arg2;
1068 return cache1->frame == cache2->frame;
1071 /* Create a new btrace frame cache. */
1073 static struct btrace_frame_cache *
1074 bfcache_new (struct frame_info *frame)
1076 struct btrace_frame_cache *cache;
1079 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1080 cache->frame = frame;
1082 slot = htab_find_slot (bfcache, cache, INSERT);
1083 gdb_assert (*slot == NULL);
1089 /* Extract the branch trace function from a branch trace frame. */
1091 static const struct btrace_function *
1092 btrace_get_frame_function (struct frame_info *frame)
1094 const struct btrace_frame_cache *cache;
1095 const struct btrace_function *bfun;
1096 struct btrace_frame_cache pattern;
1099 pattern.frame = frame;
1101 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1109 /* Implement stop_reason method for record_btrace_frame_unwind. */
1111 static enum unwind_stop_reason
1112 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1115 const struct btrace_frame_cache *cache;
1116 const struct btrace_function *bfun;
1118 cache = *this_cache;
1120 gdb_assert (bfun != NULL);
1122 if (bfun->up == NULL)
1123 return UNWIND_UNAVAILABLE;
1125 return UNWIND_NO_REASON;
1128 /* Implement this_id method for record_btrace_frame_unwind. */
1131 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1132 struct frame_id *this_id)
1134 const struct btrace_frame_cache *cache;
1135 const struct btrace_function *bfun;
1136 CORE_ADDR code, special;
1138 cache = *this_cache;
1141 gdb_assert (bfun != NULL);
1143 while (bfun->segment.prev != NULL)
1144 bfun = bfun->segment.prev;
1146 code = get_frame_func (this_frame);
1147 special = bfun->number;
1149 *this_id = frame_id_build_unavailable_stack_special (code, special);
1151 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1152 btrace_get_bfun_name (cache->bfun),
1153 core_addr_to_string_nz (this_id->code_addr),
1154 core_addr_to_string_nz (this_id->special_addr));
1157 /* Implement prev_register method for record_btrace_frame_unwind. */
1159 static struct value *
1160 record_btrace_frame_prev_register (struct frame_info *this_frame,
1164 const struct btrace_frame_cache *cache;
1165 const struct btrace_function *bfun, *caller;
1166 const struct btrace_insn *insn;
1167 struct gdbarch *gdbarch;
1171 gdbarch = get_frame_arch (this_frame);
1172 pcreg = gdbarch_pc_regnum (gdbarch);
1173 if (pcreg < 0 || regnum != pcreg)
1174 throw_error (NOT_AVAILABLE_ERROR,
1175 _("Registers are not available in btrace record history"));
1177 cache = *this_cache;
1179 gdb_assert (bfun != NULL);
1183 throw_error (NOT_AVAILABLE_ERROR,
1184 _("No caller in btrace record history"));
1186 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1188 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1193 insn = VEC_last (btrace_insn_s, caller->insn);
1196 pc += gdb_insn_length (gdbarch, pc);
1199 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1200 btrace_get_bfun_name (bfun), bfun->level,
1201 core_addr_to_string_nz (pc));
1203 return frame_unwind_got_address (this_frame, regnum, pc);
1206 /* Implement sniffer method for record_btrace_frame_unwind. */
1209 record_btrace_frame_sniffer (const struct frame_unwind *self,
1210 struct frame_info *this_frame,
1213 const struct btrace_function *bfun;
1214 struct btrace_frame_cache *cache;
1215 struct thread_info *tp;
1216 struct frame_info *next;
1218 /* THIS_FRAME does not contain a reference to its thread. */
1219 tp = find_thread_ptid (inferior_ptid);
1220 gdb_assert (tp != NULL);
1223 next = get_next_frame (this_frame);
1226 const struct btrace_insn_iterator *replay;
1228 replay = tp->btrace.replay;
1230 bfun = replay->function;
1234 const struct btrace_function *callee;
1236 callee = btrace_get_frame_function (next);
1237 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1244 DEBUG ("[frame] sniffed frame for %s on level %d",
1245 btrace_get_bfun_name (bfun), bfun->level);
1247 /* This is our frame. Initialize the frame cache. */
1248 cache = bfcache_new (this_frame);
1252 *this_cache = cache;
1256 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1259 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1260 struct frame_info *this_frame,
1263 const struct btrace_function *bfun, *callee;
1264 struct btrace_frame_cache *cache;
1265 struct frame_info *next;
1267 next = get_next_frame (this_frame);
1271 callee = btrace_get_frame_function (next);
1275 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1282 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1283 btrace_get_bfun_name (bfun), bfun->level);
1285 /* This is our frame. Initialize the frame cache. */
1286 cache = bfcache_new (this_frame);
1287 cache->tp = find_thread_ptid (inferior_ptid);
1290 *this_cache = cache;
1295 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1297 struct btrace_frame_cache *cache;
1302 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1303 gdb_assert (slot != NULL);
1305 htab_remove_elt (bfcache, cache);
1308 /* btrace recording does not store previous memory content, neither the stack
1309 frames content. Any unwinding would return errorneous results as the stack
1310 contents no longer matches the changed PC value restored from history.
1311 Therefore this unwinder reports any possibly unwound registers as
1314 const struct frame_unwind record_btrace_frame_unwind =
1317 record_btrace_frame_unwind_stop_reason,
1318 record_btrace_frame_this_id,
1319 record_btrace_frame_prev_register,
1321 record_btrace_frame_sniffer,
1322 record_btrace_frame_dealloc_cache
1325 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1328 record_btrace_frame_unwind_stop_reason,
1329 record_btrace_frame_this_id,
1330 record_btrace_frame_prev_register,
1332 record_btrace_tailcall_frame_sniffer,
1333 record_btrace_frame_dealloc_cache
1336 /* Implement the to_get_unwinder method. */
1338 static const struct frame_unwind *
1339 record_btrace_to_get_unwinder (struct target_ops *self)
1341 return &record_btrace_frame_unwind;
1344 /* Implement the to_get_tailcall_unwinder method. */
1346 static const struct frame_unwind *
1347 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1349 return &record_btrace_tailcall_frame_unwind;
1352 /* Indicate that TP should be resumed according to FLAG. */
1355 record_btrace_resume_thread (struct thread_info *tp,
1356 enum btrace_thread_flag flag)
1358 struct btrace_thread_info *btinfo;
1360 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1362 btinfo = &tp->btrace;
1364 if ((btinfo->flags & BTHR_MOVE) != 0)
1365 error (_("Thread already moving."));
1367 /* Fetch the latest branch trace. */
1370 btinfo->flags |= flag;
1373 /* Find the thread to resume given a PTID. */
1375 static struct thread_info *
1376 record_btrace_find_resume_thread (ptid_t ptid)
1378 struct thread_info *tp;
1380 /* When asked to resume everything, we pick the current thread. */
1381 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1382 ptid = inferior_ptid;
1384 return find_thread_ptid (ptid);
1387 /* Start replaying a thread. */
1389 static struct btrace_insn_iterator *
1390 record_btrace_start_replaying (struct thread_info *tp)
1392 volatile struct gdb_exception except;
1393 struct btrace_insn_iterator *replay;
1394 struct btrace_thread_info *btinfo;
1397 btinfo = &tp->btrace;
1400 /* We can't start replaying without trace. */
1401 if (btinfo->begin == NULL)
1404 /* Clear the executing flag to allow changes to the current frame.
1405 We are not actually running, yet. We just started a reverse execution
1406 command or a record goto command.
1407 For the latter, EXECUTING is false and this has no effect.
1408 For the former, EXECUTING is true and we're in to_wait, about to
1409 move the thread. Since we need to recompute the stack, we temporarily
1410 set EXECUTING to flase. */
1411 executing = is_executing (tp->ptid);
1412 set_executing (tp->ptid, 0);
1414 /* GDB stores the current frame_id when stepping in order to detects steps
1416 Since frames are computed differently when we're replaying, we need to
1417 recompute those stored frames and fix them up so we can still detect
1418 subroutines after we started replaying. */
1419 TRY_CATCH (except, RETURN_MASK_ALL)
1421 struct frame_info *frame;
1422 struct frame_id frame_id;
1423 int upd_step_frame_id, upd_step_stack_frame_id;
1425 /* The current frame without replaying - computed via normal unwind. */
1426 frame = get_current_frame ();
1427 frame_id = get_frame_id (frame);
1429 /* Check if we need to update any stepping-related frame id's. */
1430 upd_step_frame_id = frame_id_eq (frame_id,
1431 tp->control.step_frame_id);
1432 upd_step_stack_frame_id = frame_id_eq (frame_id,
1433 tp->control.step_stack_frame_id);
1435 /* We start replaying at the end of the branch trace. This corresponds
1436 to the current instruction. */
1437 replay = xmalloc (sizeof (*replay));
1438 btrace_insn_end (replay, btinfo);
1440 /* We're not replaying, yet. */
1441 gdb_assert (btinfo->replay == NULL);
1442 btinfo->replay = replay;
1444 /* Make sure we're not using any stale registers. */
1445 registers_changed_ptid (tp->ptid);
1447 /* The current frame with replaying - computed via btrace unwind. */
1448 frame = get_current_frame ();
1449 frame_id = get_frame_id (frame);
1451 /* Replace stepping related frames where necessary. */
1452 if (upd_step_frame_id)
1453 tp->control.step_frame_id = frame_id;
1454 if (upd_step_stack_frame_id)
1455 tp->control.step_stack_frame_id = frame_id;
1458 /* Restore the previous execution state. */
1459 set_executing (tp->ptid, executing);
1461 if (except.reason < 0)
1463 xfree (btinfo->replay);
1464 btinfo->replay = NULL;
1466 registers_changed_ptid (tp->ptid);
1468 throw_exception (except);
1474 /* Stop replaying a thread. */
1477 record_btrace_stop_replaying (struct thread_info *tp)
1479 struct btrace_thread_info *btinfo;
1481 btinfo = &tp->btrace;
1483 xfree (btinfo->replay);
1484 btinfo->replay = NULL;
1486 /* Make sure we're not leaving any stale registers. */
1487 registers_changed_ptid (tp->ptid);
1490 /* The to_resume method of target record-btrace. */
1493 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1494 enum gdb_signal signal)
1496 struct thread_info *tp, *other;
1497 enum btrace_thread_flag flag;
1499 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1501 /* Store the execution direction of the last resume. */
1502 record_btrace_resume_exec_dir = execution_direction;
1504 tp = record_btrace_find_resume_thread (ptid);
1506 error (_("Cannot find thread to resume."));
1508 /* Stop replaying other threads if the thread to resume is not replaying. */
1509 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1510 ALL_NON_EXITED_THREADS (other)
1511 record_btrace_stop_replaying (other);
1513 /* As long as we're not replaying, just forward the request. */
1514 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1517 return ops->to_resume (ops, ptid, step, signal);
1520 /* Compute the btrace thread flag for the requested move. */
1522 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1524 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1526 /* At the moment, we only move a single thread. We could also move
1527 all threads in parallel by single-stepping each resumed thread
1528 until the first runs into an event.
1529 When we do that, we would want to continue all other threads.
1530 For now, just resume one thread to not confuse to_wait. */
1531 record_btrace_resume_thread (tp, flag);
1533 /* We just indicate the resume intent here. The actual stepping happens in
1534 record_btrace_wait below. */
1536 /* Async support. */
1537 if (target_can_async_p ())
1539 target_async (inferior_event_handler, 0);
1540 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1544 /* Find a thread to move. */
1546 static struct thread_info *
1547 record_btrace_find_thread_to_move (ptid_t ptid)
1549 struct thread_info *tp;
1551 /* First check the parameter thread. */
1552 tp = find_thread_ptid (ptid);
1553 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1556 /* Otherwise, find one other thread that has been resumed. */
1557 ALL_NON_EXITED_THREADS (tp)
1558 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1564 /* Return a target_waitstatus indicating that we ran out of history. */
1566 static struct target_waitstatus
1567 btrace_step_no_history (void)
1569 struct target_waitstatus status;
1571 status.kind = TARGET_WAITKIND_NO_HISTORY;
1576 /* Return a target_waitstatus indicating that a step finished. */
1578 static struct target_waitstatus
1579 btrace_step_stopped (void)
1581 struct target_waitstatus status;
1583 status.kind = TARGET_WAITKIND_STOPPED;
1584 status.value.sig = GDB_SIGNAL_TRAP;
1589 /* Clear the record histories. */
1592 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1594 xfree (btinfo->insn_history);
1595 xfree (btinfo->call_history);
1597 btinfo->insn_history = NULL;
1598 btinfo->call_history = NULL;
1601 /* Step a single thread. */
1603 static struct target_waitstatus
1604 record_btrace_step_thread (struct thread_info *tp)
1606 struct btrace_insn_iterator *replay, end;
1607 struct btrace_thread_info *btinfo;
1608 struct address_space *aspace;
1609 struct inferior *inf;
1610 enum btrace_thread_flag flags;
1613 /* We can't step without an execution history. */
1614 if (btrace_is_empty (tp))
1615 return btrace_step_no_history ();
1617 btinfo = &tp->btrace;
1618 replay = btinfo->replay;
1620 flags = btinfo->flags & BTHR_MOVE;
1621 btinfo->flags &= ~BTHR_MOVE;
1623 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1628 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1631 /* We're done if we're not replaying. */
1633 return btrace_step_no_history ();
1635 /* We are always able to step at least once. */
1636 steps = btrace_insn_next (replay, 1);
1637 gdb_assert (steps == 1);
1639 /* Determine the end of the instruction trace. */
1640 btrace_insn_end (&end, btinfo);
1642 /* We stop replaying if we reached the end of the trace. */
1643 if (btrace_insn_cmp (replay, &end) == 0)
1644 record_btrace_stop_replaying (tp);
1646 return btrace_step_stopped ();
1649 /* Start replaying if we're not already doing so. */
1651 replay = record_btrace_start_replaying (tp);
1653 /* If we can't step any further, we reached the end of the history. */
1654 steps = btrace_insn_prev (replay, 1);
1656 return btrace_step_no_history ();
1658 return btrace_step_stopped ();
1661 /* We're done if we're not replaying. */
1663 return btrace_step_no_history ();
1665 inf = find_inferior_ptid (tp->ptid);
1666 aspace = inf->aspace;
1668 /* Determine the end of the instruction trace. */
1669 btrace_insn_end (&end, btinfo);
1673 const struct btrace_insn *insn;
1675 /* We are always able to step at least once. */
1676 steps = btrace_insn_next (replay, 1);
1677 gdb_assert (steps == 1);
1679 /* We stop replaying if we reached the end of the trace. */
1680 if (btrace_insn_cmp (replay, &end) == 0)
1682 record_btrace_stop_replaying (tp);
1683 return btrace_step_no_history ();
1686 insn = btrace_insn_get (replay);
1689 DEBUG ("stepping %d (%s) ... %s", tp->num,
1690 target_pid_to_str (tp->ptid),
1691 core_addr_to_string_nz (insn->pc));
1693 if (breakpoint_here_p (aspace, insn->pc))
1694 return btrace_step_stopped ();
1698 /* Start replaying if we're not already doing so. */
1700 replay = record_btrace_start_replaying (tp);
1702 inf = find_inferior_ptid (tp->ptid);
1703 aspace = inf->aspace;
1707 const struct btrace_insn *insn;
1709 /* If we can't step any further, we're done. */
1710 steps = btrace_insn_prev (replay, 1);
1712 return btrace_step_no_history ();
1714 insn = btrace_insn_get (replay);
1717 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1718 target_pid_to_str (tp->ptid),
1719 core_addr_to_string_nz (insn->pc));
1721 if (breakpoint_here_p (aspace, insn->pc))
1722 return btrace_step_stopped ();
1727 /* The to_wait method of target record-btrace. */
1730 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1731 struct target_waitstatus *status, int options)
1733 struct thread_info *tp, *other;
1735 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1737 /* As long as we're not replaying, just forward the request. */
1738 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1741 return ops->to_wait (ops, ptid, status, options);
1744 /* Let's find a thread to move. */
1745 tp = record_btrace_find_thread_to_move (ptid);
1748 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1750 status->kind = TARGET_WAITKIND_IGNORE;
1751 return minus_one_ptid;
1754 /* We only move a single thread. We're not able to correlate threads. */
1755 *status = record_btrace_step_thread (tp);
1757 /* Stop all other threads. */
1759 ALL_NON_EXITED_THREADS (other)
1760 other->btrace.flags &= ~BTHR_MOVE;
1762 /* Start record histories anew from the current position. */
1763 record_btrace_clear_histories (&tp->btrace);
1765 /* We moved the replay position but did not update registers. */
1766 registers_changed_ptid (tp->ptid);
1771 /* The to_can_execute_reverse method of target record-btrace. */
1774 record_btrace_can_execute_reverse (struct target_ops *self)
1779 /* The to_decr_pc_after_break method of target record-btrace. */
1782 record_btrace_decr_pc_after_break (struct target_ops *ops,
1783 struct gdbarch *gdbarch)
1785 /* When replaying, we do not actually execute the breakpoint instruction
1786 so there is no need to adjust the PC after hitting a breakpoint. */
1787 if (record_btrace_is_replaying (ops))
1790 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1793 /* The to_update_thread_list method of target record-btrace. */
1796 record_btrace_update_thread_list (struct target_ops *ops)
1798 /* We don't add or remove threads during replay. */
1799 if (record_btrace_is_replaying (ops))
1802 /* Forward the request. */
1804 ops->to_update_thread_list (ops);
1807 /* The to_thread_alive method of target record-btrace. */
1810 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1812 /* We don't add or remove threads during replay. */
1813 if (record_btrace_is_replaying (ops))
1814 return find_thread_ptid (ptid) != NULL;
1816 /* Forward the request. */
1818 return ops->to_thread_alive (ops, ptid);
1821 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1825 record_btrace_set_replay (struct thread_info *tp,
1826 const struct btrace_insn_iterator *it)
1828 struct btrace_thread_info *btinfo;
1830 btinfo = &tp->btrace;
1832 if (it == NULL || it->function == NULL)
1833 record_btrace_stop_replaying (tp);
1836 if (btinfo->replay == NULL)
1837 record_btrace_start_replaying (tp);
1838 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1841 *btinfo->replay = *it;
1842 registers_changed_ptid (tp->ptid);
1845 /* Start anew from the new replay position. */
1846 record_btrace_clear_histories (btinfo);
1849 /* The to_goto_record_begin method of target record-btrace. */
1852 record_btrace_goto_begin (struct target_ops *self)
1854 struct thread_info *tp;
1855 struct btrace_insn_iterator begin;
1857 tp = require_btrace_thread ();
1859 btrace_insn_begin (&begin, &tp->btrace);
1860 record_btrace_set_replay (tp, &begin);
1862 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1865 /* The to_goto_record_end method of target record-btrace. */
1868 record_btrace_goto_end (struct target_ops *ops)
1870 struct thread_info *tp;
1872 tp = require_btrace_thread ();
1874 record_btrace_set_replay (tp, NULL);
1876 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1879 /* The to_goto_record method of target record-btrace. */
1882 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1884 struct thread_info *tp;
1885 struct btrace_insn_iterator it;
1886 unsigned int number;
1891 /* Check for wrap-arounds. */
1893 error (_("Instruction number out of range."));
1895 tp = require_btrace_thread ();
1897 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1899 error (_("No such instruction."));
1901 record_btrace_set_replay (tp, &it);
1903 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1906 /* The to_execution_direction target method. */
1908 static enum exec_direction_kind
1909 record_btrace_execution_direction (struct target_ops *self)
1911 return record_btrace_resume_exec_dir;
1914 /* The to_prepare_to_generate_core target method. */
1917 record_btrace_prepare_to_generate_core (struct target_ops *self)
1919 record_btrace_generating_corefile = 1;
1922 /* The to_done_generating_core target method. */
1925 record_btrace_done_generating_core (struct target_ops *self)
1927 record_btrace_generating_corefile = 0;
1930 /* Initialize the record-btrace target ops. */
1933 init_record_btrace_ops (void)
1935 struct target_ops *ops;
1937 ops = &record_btrace_ops;
1938 ops->to_shortname = "record-btrace";
1939 ops->to_longname = "Branch tracing target";
1940 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1941 ops->to_open = record_btrace_open;
1942 ops->to_close = record_btrace_close;
1943 ops->to_detach = record_detach;
1944 ops->to_disconnect = record_disconnect;
1945 ops->to_mourn_inferior = record_mourn_inferior;
1946 ops->to_kill = record_kill;
1947 ops->to_stop_recording = record_btrace_stop_recording;
1948 ops->to_info_record = record_btrace_info;
1949 ops->to_insn_history = record_btrace_insn_history;
1950 ops->to_insn_history_from = record_btrace_insn_history_from;
1951 ops->to_insn_history_range = record_btrace_insn_history_range;
1952 ops->to_call_history = record_btrace_call_history;
1953 ops->to_call_history_from = record_btrace_call_history_from;
1954 ops->to_call_history_range = record_btrace_call_history_range;
1955 ops->to_record_is_replaying = record_btrace_is_replaying;
1956 ops->to_xfer_partial = record_btrace_xfer_partial;
1957 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1958 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1959 ops->to_fetch_registers = record_btrace_fetch_registers;
1960 ops->to_store_registers = record_btrace_store_registers;
1961 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1962 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1963 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1964 ops->to_resume = record_btrace_resume;
1965 ops->to_wait = record_btrace_wait;
1966 ops->to_update_thread_list = record_btrace_update_thread_list;
1967 ops->to_thread_alive = record_btrace_thread_alive;
1968 ops->to_goto_record_begin = record_btrace_goto_begin;
1969 ops->to_goto_record_end = record_btrace_goto_end;
1970 ops->to_goto_record = record_btrace_goto;
1971 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1972 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1973 ops->to_execution_direction = record_btrace_execution_direction;
1974 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1975 ops->to_done_generating_core = record_btrace_done_generating_core;
1976 ops->to_stratum = record_stratum;
1977 ops->to_magic = OPS_MAGIC;
1980 /* Alias for "target record". */
1983 cmd_record_btrace_start (char *args, int from_tty)
1985 if (args != NULL && *args != 0)
1986 error (_("Invalid argument."));
1988 execute_command ("target record-btrace", from_tty);
1991 /* The "set record btrace" command. */
1994 cmd_set_record_btrace (char *args, int from_tty)
1996 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
1999 /* The "show record btrace" command. */
2002 cmd_show_record_btrace (char *args, int from_tty)
2004 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2007 /* The "show record btrace replay-memory-access" command. */
2010 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2011 struct cmd_list_element *c, const char *value)
2013 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2014 replay_memory_access);
2017 void _initialize_record_btrace (void);
2019 /* Initialize btrace commands. */
2022 _initialize_record_btrace (void)
2024 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2025 _("Start branch trace recording."),
2027 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2029 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2030 _("Set record options"), &set_record_btrace_cmdlist,
2031 "set record btrace ", 0, &set_record_cmdlist);
2033 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2034 _("Show record options"), &show_record_btrace_cmdlist,
2035 "show record btrace ", 0, &show_record_cmdlist);
2037 add_setshow_enum_cmd ("replay-memory-access", no_class,
2038 replay_memory_access_types, &replay_memory_access, _("\
2039 Set what memory accesses are allowed during replay."), _("\
2040 Show what memory accesses are allowed during replay."),
2041 _("Default is READ-ONLY.\n\n\
2042 The btrace record target does not trace data.\n\
2043 The memory therefore corresponds to the live target and not \
2044 to the current replay position.\n\n\
2045 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2046 When READ-WRITE, allow accesses to read-only and read-write memory during \
2048 NULL, cmd_show_replay_memory_access,
2049 &set_record_btrace_cmdlist,
2050 &show_record_btrace_cmdlist);
2052 init_record_btrace_ops ();
2053 add_target (&record_btrace_ops);
2055 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,