1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
74 /* Print a record-btrace debug message. Use do ... while (0) to avoid
75 ambiguities when used in if statements. */
77 #define DEBUG(msg, args...) \
80 if (record_debug != 0) \
81 fprintf_unfiltered (gdb_stdlog, \
82 "[record-btrace] " msg "\n", ##args); \
87 /* Update the branch trace for the current thread and return a pointer to its
90 Throws an error if there is no thread or no trace. This function never
93 static struct thread_info *
94 require_btrace_thread (void)
96 struct thread_info *tp;
100 tp = find_thread_ptid (inferior_ptid);
102 error (_("No thread."));
106 if (btrace_is_empty (tp))
107 error (_("No trace."));
112 /* Update the branch trace for the current thread and return a pointer to its
113 branch trace information struct.
115 Throws an error if there is no thread or no trace. This function never
118 static struct btrace_thread_info *
119 require_btrace (void)
121 struct thread_info *tp;
123 tp = require_btrace_thread ();
128 /* Enable branch tracing for one thread. Warn on errors. */
131 record_btrace_enable_warn (struct thread_info *tp)
133 volatile struct gdb_exception error;
135 TRY_CATCH (error, RETURN_MASK_ERROR)
138 if (error.message != NULL)
139 warning ("%s", error.message);
142 /* Callback function to disable branch tracing for one thread. */
145 record_btrace_disable_callback (void *arg)
147 struct thread_info *tp;
154 /* Enable automatic tracing of new threads. */
157 record_btrace_auto_enable (void)
159 DEBUG ("attach thread observer");
161 record_btrace_thread_observer
162 = observer_attach_new_thread (record_btrace_enable_warn);
165 /* Disable automatic tracing of new threads. */
168 record_btrace_auto_disable (void)
170 /* The observer may have been detached, already. */
171 if (record_btrace_thread_observer == NULL)
174 DEBUG ("detach thread observer");
176 observer_detach_new_thread (record_btrace_thread_observer);
177 record_btrace_thread_observer = NULL;
180 /* The record-btrace async event handler function. */
183 record_btrace_handle_async_inferior_event (gdb_client_data data)
185 inferior_event_handler (INF_REG_EVENT, NULL);
188 /* The to_open method of target record-btrace. */
191 record_btrace_open (char *args, int from_tty)
193 struct cleanup *disable_chain;
194 struct thread_info *tp;
200 if (!target_has_execution)
201 error (_("The program is not being run."));
203 if (!target_supports_btrace ())
204 error (_("Target does not support branch tracing."));
207 error (_("Record btrace can't debug inferior in non-stop mode."));
209 gdb_assert (record_btrace_thread_observer == NULL);
211 disable_chain = make_cleanup (null_cleanup, NULL);
212 ALL_NON_EXITED_THREADS (tp)
213 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
217 make_cleanup (record_btrace_disable_callback, tp);
220 record_btrace_auto_enable ();
222 push_target (&record_btrace_ops);
224 record_btrace_async_inferior_event_handler
225 = create_async_event_handler (record_btrace_handle_async_inferior_event,
227 record_btrace_generating_corefile = 0;
229 observer_notify_record_changed (current_inferior (), 1);
231 discard_cleanups (disable_chain);
234 /* The to_stop_recording method of target record-btrace. */
237 record_btrace_stop_recording (struct target_ops *self)
239 struct thread_info *tp;
241 DEBUG ("stop recording");
243 record_btrace_auto_disable ();
245 ALL_NON_EXITED_THREADS (tp)
246 if (tp->btrace.target != NULL)
250 /* The to_close method of target record-btrace. */
253 record_btrace_close (struct target_ops *self)
255 struct thread_info *tp;
257 if (record_btrace_async_inferior_event_handler != NULL)
258 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
260 /* Make sure automatic recording gets disabled even if we did not stop
261 recording before closing the record-btrace target. */
262 record_btrace_auto_disable ();
264 /* We should have already stopped recording.
265 Tear down btrace in case we have not. */
266 ALL_NON_EXITED_THREADS (tp)
267 btrace_teardown (tp);
270 /* The to_info_record method of target record-btrace. */
273 record_btrace_info (struct target_ops *self)
275 struct btrace_thread_info *btinfo;
276 struct thread_info *tp;
277 unsigned int insns, calls;
281 tp = find_thread_ptid (inferior_ptid);
283 error (_("No thread."));
290 btinfo = &tp->btrace;
292 if (!btrace_is_empty (tp))
294 struct btrace_call_iterator call;
295 struct btrace_insn_iterator insn;
297 btrace_call_end (&call, btinfo);
298 btrace_call_prev (&call, 1);
299 calls = btrace_call_number (&call);
301 btrace_insn_end (&insn, btinfo);
302 btrace_insn_prev (&insn, 1);
303 insns = btrace_insn_number (&insn);
306 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
307 "%d (%s).\n"), insns, calls, tp->num,
308 target_pid_to_str (tp->ptid));
310 if (btrace_is_replaying (tp))
311 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
312 btrace_insn_number (btinfo->replay));
315 /* Print an unsigned int. */
318 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
320 ui_out_field_fmt (uiout, fld, "%u", val);
323 /* Disassemble a section of the recorded instruction trace. */
326 btrace_insn_history (struct ui_out *uiout,
327 const struct btrace_insn_iterator *begin,
328 const struct btrace_insn_iterator *end, int flags)
330 struct gdbarch *gdbarch;
331 struct btrace_insn_iterator it;
333 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
334 btrace_insn_number (end));
336 gdbarch = target_gdbarch ();
338 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
340 const struct btrace_insn *insn;
342 insn = btrace_insn_get (&it);
344 /* Print the instruction index. */
345 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
346 ui_out_text (uiout, "\t");
348 /* Disassembly with '/m' flag may not produce the expected result.
350 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
354 /* The to_insn_history method of target record-btrace. */
357 record_btrace_insn_history (struct target_ops *self, int size, int flags)
359 struct btrace_thread_info *btinfo;
360 struct btrace_insn_history *history;
361 struct btrace_insn_iterator begin, end;
362 struct cleanup *uiout_cleanup;
363 struct ui_out *uiout;
364 unsigned int context, covered;
366 uiout = current_uiout;
367 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
369 context = abs (size);
371 error (_("Bad record instruction-history-size."));
373 btinfo = require_btrace ();
374 history = btinfo->insn_history;
377 struct btrace_insn_iterator *replay;
379 DEBUG ("insn-history (0x%x): %d", flags, size);
381 /* If we're replaying, we start at the replay position. Otherwise, we
382 start at the tail of the trace. */
383 replay = btinfo->replay;
387 btrace_insn_end (&begin, btinfo);
389 /* We start from here and expand in the requested direction. Then we
390 expand in the other direction, as well, to fill up any remaining
395 /* We want the current position covered, as well. */
396 covered = btrace_insn_next (&end, 1);
397 covered += btrace_insn_prev (&begin, context - covered);
398 covered += btrace_insn_next (&end, context - covered);
402 covered = btrace_insn_next (&end, context);
403 covered += btrace_insn_prev (&begin, context - covered);
408 begin = history->begin;
411 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
412 btrace_insn_number (&begin), btrace_insn_number (&end));
417 covered = btrace_insn_prev (&begin, context);
422 covered = btrace_insn_next (&end, context);
427 btrace_insn_history (uiout, &begin, &end, flags);
431 printf_unfiltered (_("At the start of the branch trace record.\n"));
433 printf_unfiltered (_("At the end of the branch trace record.\n"));
436 btrace_set_insn_history (btinfo, &begin, &end);
437 do_cleanups (uiout_cleanup);
440 /* The to_insn_history_range method of target record-btrace. */
443 record_btrace_insn_history_range (struct target_ops *self,
444 ULONGEST from, ULONGEST to, int flags)
446 struct btrace_thread_info *btinfo;
447 struct btrace_insn_history *history;
448 struct btrace_insn_iterator begin, end;
449 struct cleanup *uiout_cleanup;
450 struct ui_out *uiout;
451 unsigned int low, high;
454 uiout = current_uiout;
455 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
460 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
462 /* Check for wrap-arounds. */
463 if (low != from || high != to)
464 error (_("Bad range."));
467 error (_("Bad range."));
469 btinfo = require_btrace ();
471 found = btrace_find_insn_by_number (&begin, btinfo, low);
473 error (_("Range out of bounds."));
475 found = btrace_find_insn_by_number (&end, btinfo, high);
478 /* Silently truncate the range. */
479 btrace_insn_end (&end, btinfo);
483 /* We want both begin and end to be inclusive. */
484 btrace_insn_next (&end, 1);
487 btrace_insn_history (uiout, &begin, &end, flags);
488 btrace_set_insn_history (btinfo, &begin, &end);
490 do_cleanups (uiout_cleanup);
493 /* The to_insn_history_from method of target record-btrace. */
496 record_btrace_insn_history_from (struct target_ops *self,
497 ULONGEST from, int size, int flags)
499 ULONGEST begin, end, context;
501 context = abs (size);
503 error (_("Bad record instruction-history-size."));
512 begin = from - context + 1;
517 end = from + context - 1;
519 /* Check for wrap-around. */
524 record_btrace_insn_history_range (self, begin, end, flags);
527 /* Print the instruction number range for a function call history line. */
530 btrace_call_history_insn_range (struct ui_out *uiout,
531 const struct btrace_function *bfun)
533 unsigned int begin, end, size;
535 size = VEC_length (btrace_insn_s, bfun->insn);
536 gdb_assert (size > 0);
538 begin = bfun->insn_offset;
539 end = begin + size - 1;
541 ui_out_field_uint (uiout, "insn begin", begin);
542 ui_out_text (uiout, ",");
543 ui_out_field_uint (uiout, "insn end", end);
546 /* Print the source line information for a function call history line. */
549 btrace_call_history_src_line (struct ui_out *uiout,
550 const struct btrace_function *bfun)
559 ui_out_field_string (uiout, "file",
560 symtab_to_filename_for_display (sym->symtab));
562 begin = bfun->lbegin;
568 ui_out_text (uiout, ":");
569 ui_out_field_int (uiout, "min line", begin);
574 ui_out_text (uiout, ",");
575 ui_out_field_int (uiout, "max line", end);
578 /* Get the name of a branch trace function. */
581 btrace_get_bfun_name (const struct btrace_function *bfun)
583 struct minimal_symbol *msym;
593 return SYMBOL_PRINT_NAME (sym);
594 else if (msym != NULL)
595 return MSYMBOL_PRINT_NAME (msym);
600 /* Disassemble a section of the recorded function trace. */
603 btrace_call_history (struct ui_out *uiout,
604 const struct btrace_thread_info *btinfo,
605 const struct btrace_call_iterator *begin,
606 const struct btrace_call_iterator *end,
607 enum record_print_flag flags)
609 struct btrace_call_iterator it;
611 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
612 btrace_call_number (end));
614 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
616 const struct btrace_function *bfun;
617 struct minimal_symbol *msym;
620 bfun = btrace_call_get (&it);
624 /* Print the function index. */
625 ui_out_field_uint (uiout, "index", bfun->number);
626 ui_out_text (uiout, "\t");
628 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
630 int level = bfun->level + btinfo->level, i;
632 for (i = 0; i < level; ++i)
633 ui_out_text (uiout, " ");
637 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
638 else if (msym != NULL)
639 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
640 else if (!ui_out_is_mi_like_p (uiout))
641 ui_out_field_string (uiout, "function", "??");
643 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
645 ui_out_text (uiout, _("\tinst "));
646 btrace_call_history_insn_range (uiout, bfun);
649 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
651 ui_out_text (uiout, _("\tat "));
652 btrace_call_history_src_line (uiout, bfun);
655 ui_out_text (uiout, "\n");
659 /* The to_call_history method of target record-btrace. */
662 record_btrace_call_history (struct target_ops *self, int size, int flags)
664 struct btrace_thread_info *btinfo;
665 struct btrace_call_history *history;
666 struct btrace_call_iterator begin, end;
667 struct cleanup *uiout_cleanup;
668 struct ui_out *uiout;
669 unsigned int context, covered;
671 uiout = current_uiout;
672 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
674 context = abs (size);
676 error (_("Bad record function-call-history-size."));
678 btinfo = require_btrace ();
679 history = btinfo->call_history;
682 struct btrace_insn_iterator *replay;
684 DEBUG ("call-history (0x%x): %d", flags, size);
686 /* If we're replaying, we start at the replay position. Otherwise, we
687 start at the tail of the trace. */
688 replay = btinfo->replay;
691 begin.function = replay->function;
692 begin.btinfo = btinfo;
695 btrace_call_end (&begin, btinfo);
697 /* We start from here and expand in the requested direction. Then we
698 expand in the other direction, as well, to fill up any remaining
703 /* We want the current position covered, as well. */
704 covered = btrace_call_next (&end, 1);
705 covered += btrace_call_prev (&begin, context - covered);
706 covered += btrace_call_next (&end, context - covered);
710 covered = btrace_call_next (&end, context);
711 covered += btrace_call_prev (&begin, context- covered);
716 begin = history->begin;
719 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
720 btrace_call_number (&begin), btrace_call_number (&end));
725 covered = btrace_call_prev (&begin, context);
730 covered = btrace_call_next (&end, context);
735 btrace_call_history (uiout, btinfo, &begin, &end, flags);
739 printf_unfiltered (_("At the start of the branch trace record.\n"));
741 printf_unfiltered (_("At the end of the branch trace record.\n"));
744 btrace_set_call_history (btinfo, &begin, &end);
745 do_cleanups (uiout_cleanup);
748 /* The to_call_history_range method of target record-btrace. */
751 record_btrace_call_history_range (struct target_ops *self,
752 ULONGEST from, ULONGEST to, int flags)
754 struct btrace_thread_info *btinfo;
755 struct btrace_call_history *history;
756 struct btrace_call_iterator begin, end;
757 struct cleanup *uiout_cleanup;
758 struct ui_out *uiout;
759 unsigned int low, high;
762 uiout = current_uiout;
763 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
768 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
770 /* Check for wrap-arounds. */
771 if (low != from || high != to)
772 error (_("Bad range."));
775 error (_("Bad range."));
777 btinfo = require_btrace ();
779 found = btrace_find_call_by_number (&begin, btinfo, low);
781 error (_("Range out of bounds."));
783 found = btrace_find_call_by_number (&end, btinfo, high);
786 /* Silently truncate the range. */
787 btrace_call_end (&end, btinfo);
791 /* We want both begin and end to be inclusive. */
792 btrace_call_next (&end, 1);
795 btrace_call_history (uiout, btinfo, &begin, &end, flags);
796 btrace_set_call_history (btinfo, &begin, &end);
798 do_cleanups (uiout_cleanup);
801 /* The to_call_history_from method of target record-btrace. */
804 record_btrace_call_history_from (struct target_ops *self,
805 ULONGEST from, int size, int flags)
807 ULONGEST begin, end, context;
809 context = abs (size);
811 error (_("Bad record function-call-history-size."));
820 begin = from - context + 1;
825 end = from + context - 1;
827 /* Check for wrap-around. */
832 record_btrace_call_history_range (self, begin, end, flags);
835 /* The to_record_is_replaying method of target record-btrace. */
838 record_btrace_is_replaying (struct target_ops *self)
840 struct thread_info *tp;
842 ALL_NON_EXITED_THREADS (tp)
843 if (btrace_is_replaying (tp))
849 /* The to_xfer_partial method of target record-btrace. */
851 static enum target_xfer_status
852 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
853 const char *annex, gdb_byte *readbuf,
854 const gdb_byte *writebuf, ULONGEST offset,
855 ULONGEST len, ULONGEST *xfered_len)
857 struct target_ops *t;
859 /* Filter out requests that don't make sense during replay. */
860 if (replay_memory_access == replay_memory_access_read_only
861 && !record_btrace_generating_corefile
862 && record_btrace_is_replaying (ops))
866 case TARGET_OBJECT_MEMORY:
868 struct target_section *section;
870 /* We do not allow writing memory in general. */
871 if (writebuf != NULL)
874 return TARGET_XFER_UNAVAILABLE;
877 /* We allow reading readonly memory. */
878 section = target_section_by_addr (ops, offset);
881 /* Check if the section we found is readonly. */
882 if ((bfd_get_section_flags (section->the_bfd_section->owner,
883 section->the_bfd_section)
884 & SEC_READONLY) != 0)
886 /* Truncate the request to fit into this section. */
887 len = min (len, section->endaddr - offset);
893 return TARGET_XFER_UNAVAILABLE;
898 /* Forward the request. */
900 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
901 offset, len, xfered_len);
904 /* The to_insert_breakpoint method of target record-btrace. */
907 record_btrace_insert_breakpoint (struct target_ops *ops,
908 struct gdbarch *gdbarch,
909 struct bp_target_info *bp_tgt)
911 volatile struct gdb_exception except;
915 /* Inserting breakpoints requires accessing memory. Allow it for the
916 duration of this function. */
917 old = replay_memory_access;
918 replay_memory_access = replay_memory_access_read_write;
921 TRY_CATCH (except, RETURN_MASK_ALL)
922 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
924 replay_memory_access = old;
926 if (except.reason < 0)
927 throw_exception (except);
932 /* The to_remove_breakpoint method of target record-btrace. */
935 record_btrace_remove_breakpoint (struct target_ops *ops,
936 struct gdbarch *gdbarch,
937 struct bp_target_info *bp_tgt)
939 volatile struct gdb_exception except;
943 /* Removing breakpoints requires accessing memory. Allow it for the
944 duration of this function. */
945 old = replay_memory_access;
946 replay_memory_access = replay_memory_access_read_write;
949 TRY_CATCH (except, RETURN_MASK_ALL)
950 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
952 replay_memory_access = old;
954 if (except.reason < 0)
955 throw_exception (except);
960 /* The to_fetch_registers method of target record-btrace. */
963 record_btrace_fetch_registers (struct target_ops *ops,
964 struct regcache *regcache, int regno)
966 struct btrace_insn_iterator *replay;
967 struct thread_info *tp;
969 tp = find_thread_ptid (inferior_ptid);
970 gdb_assert (tp != NULL);
972 replay = tp->btrace.replay;
973 if (replay != NULL && !record_btrace_generating_corefile)
975 const struct btrace_insn *insn;
976 struct gdbarch *gdbarch;
979 gdbarch = get_regcache_arch (regcache);
980 pcreg = gdbarch_pc_regnum (gdbarch);
984 /* We can only provide the PC register. */
985 if (regno >= 0 && regno != pcreg)
988 insn = btrace_insn_get (replay);
989 gdb_assert (insn != NULL);
991 regcache_raw_supply (regcache, regno, &insn->pc);
995 struct target_ops *t = ops->beneath;
997 t->to_fetch_registers (t, regcache, regno);
1001 /* The to_store_registers method of target record-btrace. */
1004 record_btrace_store_registers (struct target_ops *ops,
1005 struct regcache *regcache, int regno)
1007 struct target_ops *t;
1009 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1010 error (_("This record target does not allow writing registers."));
1012 gdb_assert (may_write_registers != 0);
1015 t->to_store_registers (t, regcache, regno);
1018 /* The to_prepare_to_store method of target record-btrace. */
1021 record_btrace_prepare_to_store (struct target_ops *ops,
1022 struct regcache *regcache)
1024 struct target_ops *t;
1026 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1030 t->to_prepare_to_store (t, regcache);
1033 /* The branch trace frame cache. */
1035 struct btrace_frame_cache
1038 struct thread_info *tp;
1040 /* The frame info. */
1041 struct frame_info *frame;
1043 /* The branch trace function segment. */
1044 const struct btrace_function *bfun;
1047 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1049 static htab_t bfcache;
1051 /* hash_f for htab_create_alloc of bfcache. */
1054 bfcache_hash (const void *arg)
1056 const struct btrace_frame_cache *cache = arg;
1058 return htab_hash_pointer (cache->frame);
1061 /* eq_f for htab_create_alloc of bfcache. */
1064 bfcache_eq (const void *arg1, const void *arg2)
1066 const struct btrace_frame_cache *cache1 = arg1;
1067 const struct btrace_frame_cache *cache2 = arg2;
1069 return cache1->frame == cache2->frame;
1072 /* Create a new btrace frame cache. */
1074 static struct btrace_frame_cache *
1075 bfcache_new (struct frame_info *frame)
1077 struct btrace_frame_cache *cache;
1080 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1081 cache->frame = frame;
1083 slot = htab_find_slot (bfcache, cache, INSERT);
1084 gdb_assert (*slot == NULL);
1090 /* Extract the branch trace function from a branch trace frame. */
1092 static const struct btrace_function *
1093 btrace_get_frame_function (struct frame_info *frame)
1095 const struct btrace_frame_cache *cache;
1096 const struct btrace_function *bfun;
1097 struct btrace_frame_cache pattern;
1100 pattern.frame = frame;
1102 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1110 /* Implement stop_reason method for record_btrace_frame_unwind. */
1112 static enum unwind_stop_reason
1113 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1116 const struct btrace_frame_cache *cache;
1117 const struct btrace_function *bfun;
1119 cache = *this_cache;
1121 gdb_assert (bfun != NULL);
1123 if (bfun->up == NULL)
1124 return UNWIND_UNAVAILABLE;
1126 return UNWIND_NO_REASON;
1129 /* Implement this_id method for record_btrace_frame_unwind. */
1132 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1133 struct frame_id *this_id)
1135 const struct btrace_frame_cache *cache;
1136 const struct btrace_function *bfun;
1137 CORE_ADDR code, special;
1139 cache = *this_cache;
1142 gdb_assert (bfun != NULL);
1144 while (bfun->segment.prev != NULL)
1145 bfun = bfun->segment.prev;
1147 code = get_frame_func (this_frame);
1148 special = bfun->number;
1150 *this_id = frame_id_build_unavailable_stack_special (code, special);
1152 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1153 btrace_get_bfun_name (cache->bfun),
1154 core_addr_to_string_nz (this_id->code_addr),
1155 core_addr_to_string_nz (this_id->special_addr));
1158 /* Implement prev_register method for record_btrace_frame_unwind. */
1160 static struct value *
1161 record_btrace_frame_prev_register (struct frame_info *this_frame,
1165 const struct btrace_frame_cache *cache;
1166 const struct btrace_function *bfun, *caller;
1167 const struct btrace_insn *insn;
1168 struct gdbarch *gdbarch;
1172 gdbarch = get_frame_arch (this_frame);
1173 pcreg = gdbarch_pc_regnum (gdbarch);
1174 if (pcreg < 0 || regnum != pcreg)
1175 throw_error (NOT_AVAILABLE_ERROR,
1176 _("Registers are not available in btrace record history"));
1178 cache = *this_cache;
1180 gdb_assert (bfun != NULL);
1184 throw_error (NOT_AVAILABLE_ERROR,
1185 _("No caller in btrace record history"));
1187 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1189 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1194 insn = VEC_last (btrace_insn_s, caller->insn);
1197 pc += gdb_insn_length (gdbarch, pc);
1200 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1201 btrace_get_bfun_name (bfun), bfun->level,
1202 core_addr_to_string_nz (pc));
1204 return frame_unwind_got_address (this_frame, regnum, pc);
1207 /* Implement sniffer method for record_btrace_frame_unwind. */
1210 record_btrace_frame_sniffer (const struct frame_unwind *self,
1211 struct frame_info *this_frame,
1214 const struct btrace_function *bfun;
1215 struct btrace_frame_cache *cache;
1216 struct thread_info *tp;
1217 struct frame_info *next;
1219 /* THIS_FRAME does not contain a reference to its thread. */
1220 tp = find_thread_ptid (inferior_ptid);
1221 gdb_assert (tp != NULL);
1224 next = get_next_frame (this_frame);
1227 const struct btrace_insn_iterator *replay;
1229 replay = tp->btrace.replay;
1231 bfun = replay->function;
1235 const struct btrace_function *callee;
1237 callee = btrace_get_frame_function (next);
1238 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1245 DEBUG ("[frame] sniffed frame for %s on level %d",
1246 btrace_get_bfun_name (bfun), bfun->level);
1248 /* This is our frame. Initialize the frame cache. */
1249 cache = bfcache_new (this_frame);
1253 *this_cache = cache;
1257 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1260 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1261 struct frame_info *this_frame,
1264 const struct btrace_function *bfun, *callee;
1265 struct btrace_frame_cache *cache;
1266 struct frame_info *next;
1268 next = get_next_frame (this_frame);
1272 callee = btrace_get_frame_function (next);
1276 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1283 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1284 btrace_get_bfun_name (bfun), bfun->level);
1286 /* This is our frame. Initialize the frame cache. */
1287 cache = bfcache_new (this_frame);
1288 cache->tp = find_thread_ptid (inferior_ptid);
1291 *this_cache = cache;
1296 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1298 struct btrace_frame_cache *cache;
1303 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1304 gdb_assert (slot != NULL);
1306 htab_remove_elt (bfcache, cache);
1309 /* btrace recording does not store previous memory content, neither the stack
1310 frames content. Any unwinding would return errorneous results as the stack
1311 contents no longer matches the changed PC value restored from history.
1312 Therefore this unwinder reports any possibly unwound registers as
1315 const struct frame_unwind record_btrace_frame_unwind =
1318 record_btrace_frame_unwind_stop_reason,
1319 record_btrace_frame_this_id,
1320 record_btrace_frame_prev_register,
1322 record_btrace_frame_sniffer,
1323 record_btrace_frame_dealloc_cache
1326 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1329 record_btrace_frame_unwind_stop_reason,
1330 record_btrace_frame_this_id,
1331 record_btrace_frame_prev_register,
1333 record_btrace_tailcall_frame_sniffer,
1334 record_btrace_frame_dealloc_cache
1337 /* Implement the to_get_unwinder method. */
1339 static const struct frame_unwind *
1340 record_btrace_to_get_unwinder (struct target_ops *self)
1342 return &record_btrace_frame_unwind;
1345 /* Implement the to_get_tailcall_unwinder method. */
1347 static const struct frame_unwind *
1348 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1350 return &record_btrace_tailcall_frame_unwind;
1353 /* Indicate that TP should be resumed according to FLAG. */
1356 record_btrace_resume_thread (struct thread_info *tp,
1357 enum btrace_thread_flag flag)
1359 struct btrace_thread_info *btinfo;
1361 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1363 btinfo = &tp->btrace;
1365 if ((btinfo->flags & BTHR_MOVE) != 0)
1366 error (_("Thread already moving."));
1368 /* Fetch the latest branch trace. */
1371 btinfo->flags |= flag;
1374 /* Find the thread to resume given a PTID. */
1376 static struct thread_info *
1377 record_btrace_find_resume_thread (ptid_t ptid)
1379 struct thread_info *tp;
1381 /* When asked to resume everything, we pick the current thread. */
1382 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1383 ptid = inferior_ptid;
1385 return find_thread_ptid (ptid);
1388 /* Start replaying a thread. */
1390 static struct btrace_insn_iterator *
1391 record_btrace_start_replaying (struct thread_info *tp)
1393 volatile struct gdb_exception except;
1394 struct btrace_insn_iterator *replay;
1395 struct btrace_thread_info *btinfo;
1398 btinfo = &tp->btrace;
1401 /* We can't start replaying without trace. */
1402 if (btinfo->begin == NULL)
1405 /* Clear the executing flag to allow changes to the current frame.
1406 We are not actually running, yet. We just started a reverse execution
1407 command or a record goto command.
1408 For the latter, EXECUTING is false and this has no effect.
1409 For the former, EXECUTING is true and we're in to_wait, about to
1410 move the thread. Since we need to recompute the stack, we temporarily
1411 set EXECUTING to flase. */
1412 executing = is_executing (tp->ptid);
1413 set_executing (tp->ptid, 0);
1415 /* GDB stores the current frame_id when stepping in order to detects steps
1417 Since frames are computed differently when we're replaying, we need to
1418 recompute those stored frames and fix them up so we can still detect
1419 subroutines after we started replaying. */
1420 TRY_CATCH (except, RETURN_MASK_ALL)
1422 struct frame_info *frame;
1423 struct frame_id frame_id;
1424 int upd_step_frame_id, upd_step_stack_frame_id;
1426 /* The current frame without replaying - computed via normal unwind. */
1427 frame = get_current_frame ();
1428 frame_id = get_frame_id (frame);
1430 /* Check if we need to update any stepping-related frame id's. */
1431 upd_step_frame_id = frame_id_eq (frame_id,
1432 tp->control.step_frame_id);
1433 upd_step_stack_frame_id = frame_id_eq (frame_id,
1434 tp->control.step_stack_frame_id);
1436 /* We start replaying at the end of the branch trace. This corresponds
1437 to the current instruction. */
1438 replay = xmalloc (sizeof (*replay));
1439 btrace_insn_end (replay, btinfo);
1441 /* We're not replaying, yet. */
1442 gdb_assert (btinfo->replay == NULL);
1443 btinfo->replay = replay;
1445 /* Make sure we're not using any stale registers. */
1446 registers_changed_ptid (tp->ptid);
1448 /* The current frame with replaying - computed via btrace unwind. */
1449 frame = get_current_frame ();
1450 frame_id = get_frame_id (frame);
1452 /* Replace stepping related frames where necessary. */
1453 if (upd_step_frame_id)
1454 tp->control.step_frame_id = frame_id;
1455 if (upd_step_stack_frame_id)
1456 tp->control.step_stack_frame_id = frame_id;
1459 /* Restore the previous execution state. */
1460 set_executing (tp->ptid, executing);
1462 if (except.reason < 0)
1464 xfree (btinfo->replay);
1465 btinfo->replay = NULL;
1467 registers_changed_ptid (tp->ptid);
1469 throw_exception (except);
1475 /* Stop replaying a thread. */
1478 record_btrace_stop_replaying (struct thread_info *tp)
1480 struct btrace_thread_info *btinfo;
1482 btinfo = &tp->btrace;
1484 xfree (btinfo->replay);
1485 btinfo->replay = NULL;
1487 /* Make sure we're not leaving any stale registers. */
1488 registers_changed_ptid (tp->ptid);
1491 /* The to_resume method of target record-btrace. */
1494 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1495 enum gdb_signal signal)
1497 struct thread_info *tp, *other;
1498 enum btrace_thread_flag flag;
1500 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1502 /* Store the execution direction of the last resume. */
1503 record_btrace_resume_exec_dir = execution_direction;
1505 tp = record_btrace_find_resume_thread (ptid);
1507 error (_("Cannot find thread to resume."));
1509 /* Stop replaying other threads if the thread to resume is not replaying. */
1510 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1511 ALL_NON_EXITED_THREADS (other)
1512 record_btrace_stop_replaying (other);
1514 /* As long as we're not replaying, just forward the request. */
1515 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1518 return ops->to_resume (ops, ptid, step, signal);
1521 /* Compute the btrace thread flag for the requested move. */
1523 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1525 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1527 /* At the moment, we only move a single thread. We could also move
1528 all threads in parallel by single-stepping each resumed thread
1529 until the first runs into an event.
1530 When we do that, we would want to continue all other threads.
1531 For now, just resume one thread to not confuse to_wait. */
1532 record_btrace_resume_thread (tp, flag);
1534 /* We just indicate the resume intent here. The actual stepping happens in
1535 record_btrace_wait below. */
1537 /* Async support. */
1538 if (target_can_async_p ())
1540 target_async (inferior_event_handler, 0);
1541 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1545 /* Find a thread to move. */
1547 static struct thread_info *
1548 record_btrace_find_thread_to_move (ptid_t ptid)
1550 struct thread_info *tp;
1552 /* First check the parameter thread. */
1553 tp = find_thread_ptid (ptid);
1554 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1557 /* Otherwise, find one other thread that has been resumed. */
1558 ALL_NON_EXITED_THREADS (tp)
1559 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1565 /* Return a target_waitstatus indicating that we ran out of history. */
1567 static struct target_waitstatus
1568 btrace_step_no_history (void)
1570 struct target_waitstatus status;
1572 status.kind = TARGET_WAITKIND_NO_HISTORY;
1577 /* Return a target_waitstatus indicating that a step finished. */
1579 static struct target_waitstatus
1580 btrace_step_stopped (void)
1582 struct target_waitstatus status;
1584 status.kind = TARGET_WAITKIND_STOPPED;
1585 status.value.sig = GDB_SIGNAL_TRAP;
1590 /* Clear the record histories. */
1593 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1595 xfree (btinfo->insn_history);
1596 xfree (btinfo->call_history);
1598 btinfo->insn_history = NULL;
1599 btinfo->call_history = NULL;
1602 /* Step a single thread. */
1604 static struct target_waitstatus
1605 record_btrace_step_thread (struct thread_info *tp)
1607 struct btrace_insn_iterator *replay, end;
1608 struct btrace_thread_info *btinfo;
1609 struct address_space *aspace;
1610 struct inferior *inf;
1611 enum btrace_thread_flag flags;
1614 /* We can't step without an execution history. */
1615 if (btrace_is_empty (tp))
1616 return btrace_step_no_history ();
1618 btinfo = &tp->btrace;
1619 replay = btinfo->replay;
1621 flags = btinfo->flags & BTHR_MOVE;
1622 btinfo->flags &= ~BTHR_MOVE;
1624 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1629 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1632 /* We're done if we're not replaying. */
1634 return btrace_step_no_history ();
1636 /* We are always able to step at least once. */
1637 steps = btrace_insn_next (replay, 1);
1638 gdb_assert (steps == 1);
1640 /* Determine the end of the instruction trace. */
1641 btrace_insn_end (&end, btinfo);
1643 /* We stop replaying if we reached the end of the trace. */
1644 if (btrace_insn_cmp (replay, &end) == 0)
1645 record_btrace_stop_replaying (tp);
1647 return btrace_step_stopped ();
1650 /* Start replaying if we're not already doing so. */
1652 replay = record_btrace_start_replaying (tp);
1654 /* If we can't step any further, we reached the end of the history. */
1655 steps = btrace_insn_prev (replay, 1);
1657 return btrace_step_no_history ();
1659 return btrace_step_stopped ();
1662 /* We're done if we're not replaying. */
1664 return btrace_step_no_history ();
1666 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1667 aspace = inf->aspace;
1669 /* Determine the end of the instruction trace. */
1670 btrace_insn_end (&end, btinfo);
1674 const struct btrace_insn *insn;
1676 /* We are always able to step at least once. */
1677 steps = btrace_insn_next (replay, 1);
1678 gdb_assert (steps == 1);
1680 /* We stop replaying if we reached the end of the trace. */
1681 if (btrace_insn_cmp (replay, &end) == 0)
1683 record_btrace_stop_replaying (tp);
1684 return btrace_step_no_history ();
1687 insn = btrace_insn_get (replay);
1690 DEBUG ("stepping %d (%s) ... %s", tp->num,
1691 target_pid_to_str (tp->ptid),
1692 core_addr_to_string_nz (insn->pc));
1694 if (breakpoint_here_p (aspace, insn->pc))
1695 return btrace_step_stopped ();
1699 /* Start replaying if we're not already doing so. */
1701 replay = record_btrace_start_replaying (tp);
1703 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1704 aspace = inf->aspace;
1708 const struct btrace_insn *insn;
1710 /* If we can't step any further, we're done. */
1711 steps = btrace_insn_prev (replay, 1);
1713 return btrace_step_no_history ();
1715 insn = btrace_insn_get (replay);
1718 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1719 target_pid_to_str (tp->ptid),
1720 core_addr_to_string_nz (insn->pc));
1722 if (breakpoint_here_p (aspace, insn->pc))
1723 return btrace_step_stopped ();
1728 /* The to_wait method of target record-btrace. */
1731 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1732 struct target_waitstatus *status, int options)
1734 struct thread_info *tp, *other;
1736 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1738 /* As long as we're not replaying, just forward the request. */
1739 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1742 return ops->to_wait (ops, ptid, status, options);
1745 /* Let's find a thread to move. */
1746 tp = record_btrace_find_thread_to_move (ptid);
1749 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1751 status->kind = TARGET_WAITKIND_IGNORE;
1752 return minus_one_ptid;
1755 /* We only move a single thread. We're not able to correlate threads. */
1756 *status = record_btrace_step_thread (tp);
1758 /* Stop all other threads. */
1760 ALL_NON_EXITED_THREADS (other)
1761 other->btrace.flags &= ~BTHR_MOVE;
1763 /* Start record histories anew from the current position. */
1764 record_btrace_clear_histories (&tp->btrace);
1766 /* We moved the replay position but did not update registers. */
1767 registers_changed_ptid (tp->ptid);
1772 /* The to_can_execute_reverse method of target record-btrace. */
1775 record_btrace_can_execute_reverse (struct target_ops *self)
1780 /* The to_decr_pc_after_break method of target record-btrace. */
1783 record_btrace_decr_pc_after_break (struct target_ops *ops,
1784 struct gdbarch *gdbarch)
1786 /* When replaying, we do not actually execute the breakpoint instruction
1787 so there is no need to adjust the PC after hitting a breakpoint. */
1788 if (record_btrace_is_replaying (ops))
1791 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1794 /* The to_find_new_threads method of target record-btrace. */
1797 record_btrace_find_new_threads (struct target_ops *ops)
1799 /* Don't expect new threads if we're replaying. */
1800 if (record_btrace_is_replaying (ops))
1803 /* Forward the request. */
1805 ops->to_find_new_threads (ops);
1808 /* The to_thread_alive method of target record-btrace. */
1811 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1813 /* We don't add or remove threads during replay. */
1814 if (record_btrace_is_replaying (ops))
1815 return find_thread_ptid (ptid) != NULL;
1817 /* Forward the request. */
1819 return ops->to_thread_alive (ops, ptid);
1822 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1826 record_btrace_set_replay (struct thread_info *tp,
1827 const struct btrace_insn_iterator *it)
1829 struct btrace_thread_info *btinfo;
1831 btinfo = &tp->btrace;
1833 if (it == NULL || it->function == NULL)
1834 record_btrace_stop_replaying (tp);
1837 if (btinfo->replay == NULL)
1838 record_btrace_start_replaying (tp);
1839 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1842 *btinfo->replay = *it;
1843 registers_changed_ptid (tp->ptid);
1846 /* Start anew from the new replay position. */
1847 record_btrace_clear_histories (btinfo);
1850 /* The to_goto_record_begin method of target record-btrace. */
1853 record_btrace_goto_begin (struct target_ops *self)
1855 struct thread_info *tp;
1856 struct btrace_insn_iterator begin;
1858 tp = require_btrace_thread ();
1860 btrace_insn_begin (&begin, &tp->btrace);
1861 record_btrace_set_replay (tp, &begin);
1863 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1866 /* The to_goto_record_end method of target record-btrace. */
1869 record_btrace_goto_end (struct target_ops *ops)
1871 struct thread_info *tp;
1873 tp = require_btrace_thread ();
1875 record_btrace_set_replay (tp, NULL);
1877 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1880 /* The to_goto_record method of target record-btrace. */
1883 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1885 struct thread_info *tp;
1886 struct btrace_insn_iterator it;
1887 unsigned int number;
1892 /* Check for wrap-arounds. */
1894 error (_("Instruction number out of range."));
1896 tp = require_btrace_thread ();
1898 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1900 error (_("No such instruction."));
1902 record_btrace_set_replay (tp, &it);
1904 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1907 /* The to_execution_direction target method. */
1909 static enum exec_direction_kind
1910 record_btrace_execution_direction (struct target_ops *self)
1912 return record_btrace_resume_exec_dir;
1915 /* The to_prepare_to_generate_core target method. */
1918 record_btrace_prepare_to_generate_core (struct target_ops *self)
1920 record_btrace_generating_corefile = 1;
1923 /* The to_done_generating_core target method. */
1926 record_btrace_done_generating_core (struct target_ops *self)
1928 record_btrace_generating_corefile = 0;
1931 /* Initialize the record-btrace target ops. */
1934 init_record_btrace_ops (void)
1936 struct target_ops *ops;
1938 ops = &record_btrace_ops;
1939 ops->to_shortname = "record-btrace";
1940 ops->to_longname = "Branch tracing target";
1941 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1942 ops->to_open = record_btrace_open;
1943 ops->to_close = record_btrace_close;
1944 ops->to_detach = record_detach;
1945 ops->to_disconnect = record_disconnect;
1946 ops->to_mourn_inferior = record_mourn_inferior;
1947 ops->to_kill = record_kill;
1948 ops->to_stop_recording = record_btrace_stop_recording;
1949 ops->to_info_record = record_btrace_info;
1950 ops->to_insn_history = record_btrace_insn_history;
1951 ops->to_insn_history_from = record_btrace_insn_history_from;
1952 ops->to_insn_history_range = record_btrace_insn_history_range;
1953 ops->to_call_history = record_btrace_call_history;
1954 ops->to_call_history_from = record_btrace_call_history_from;
1955 ops->to_call_history_range = record_btrace_call_history_range;
1956 ops->to_record_is_replaying = record_btrace_is_replaying;
1957 ops->to_xfer_partial = record_btrace_xfer_partial;
1958 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1959 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1960 ops->to_fetch_registers = record_btrace_fetch_registers;
1961 ops->to_store_registers = record_btrace_store_registers;
1962 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1963 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1964 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1965 ops->to_resume = record_btrace_resume;
1966 ops->to_wait = record_btrace_wait;
1967 ops->to_find_new_threads = record_btrace_find_new_threads;
1968 ops->to_thread_alive = record_btrace_thread_alive;
1969 ops->to_goto_record_begin = record_btrace_goto_begin;
1970 ops->to_goto_record_end = record_btrace_goto_end;
1971 ops->to_goto_record = record_btrace_goto;
1972 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1973 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1974 ops->to_execution_direction = record_btrace_execution_direction;
1975 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1976 ops->to_done_generating_core = record_btrace_done_generating_core;
1977 ops->to_stratum = record_stratum;
1978 ops->to_magic = OPS_MAGIC;
1981 /* Alias for "target record". */
1984 cmd_record_btrace_start (char *args, int from_tty)
1986 if (args != NULL && *args != 0)
1987 error (_("Invalid argument."));
1989 execute_command ("target record-btrace", from_tty);
1992 /* The "set record btrace" command. */
1995 cmd_set_record_btrace (char *args, int from_tty)
1997 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2000 /* The "show record btrace" command. */
2003 cmd_show_record_btrace (char *args, int from_tty)
2005 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2008 /* The "show record btrace replay-memory-access" command. */
2011 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2012 struct cmd_list_element *c, const char *value)
2014 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2015 replay_memory_access);
2018 void _initialize_record_btrace (void);
2020 /* Initialize btrace commands. */
2023 _initialize_record_btrace (void)
2025 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2026 _("Start branch trace recording."),
2028 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2030 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2031 _("Set record options"), &set_record_btrace_cmdlist,
2032 "set record btrace ", 0, &set_record_cmdlist);
2034 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2035 _("Show record options"), &show_record_btrace_cmdlist,
2036 "show record btrace ", 0, &show_record_cmdlist);
2038 add_setshow_enum_cmd ("replay-memory-access", no_class,
2039 replay_memory_access_types, &replay_memory_access, _("\
2040 Set what memory accesses are allowed during replay."), _("\
2041 Show what memory accesses are allowed during replay."),
2042 _("Default is READ-ONLY.\n\n\
2043 The btrace record target does not trace data.\n\
2044 The memory therefore corresponds to the live target and not \
2045 to the current replay position.\n\n\
2046 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2047 When READ-WRITE, allow accesses to read-only and read-write memory during \
2049 NULL, cmd_show_replay_memory_access,
2050 &set_record_btrace_cmdlist,
2051 &show_record_btrace_cmdlist);
2053 init_record_btrace_ops ();
2054 add_target (&record_btrace_ops);
2056 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,