1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* Print a record-btrace debug message. Use do ... while (0) to avoid
74 ambiguities when used in if statements. */
76 #define DEBUG(msg, args...) \
79 if (record_debug != 0) \
80 fprintf_unfiltered (gdb_stdlog, \
81 "[record-btrace] " msg "\n", ##args); \
86 /* Update the branch trace for the current thread and return a pointer to its
89 Throws an error if there is no thread or no trace. This function never
92 static struct thread_info *
93 require_btrace_thread (void)
95 struct thread_info *tp;
99 tp = find_thread_ptid (inferior_ptid);
101 error (_("No thread."));
105 if (btrace_is_empty (tp))
106 error (_("No trace."));
111 /* Update the branch trace for the current thread and return a pointer to its
112 branch trace information struct.
114 Throws an error if there is no thread or no trace. This function never
117 static struct btrace_thread_info *
118 require_btrace (void)
120 struct thread_info *tp;
122 tp = require_btrace_thread ();
127 /* Enable branch tracing for one thread. Warn on errors. */
130 record_btrace_enable_warn (struct thread_info *tp)
132 volatile struct gdb_exception error;
134 TRY_CATCH (error, RETURN_MASK_ERROR)
137 if (error.message != NULL)
138 warning ("%s", error.message);
141 /* Callback function to disable branch tracing for one thread. */
144 record_btrace_disable_callback (void *arg)
146 struct thread_info *tp;
153 /* Enable automatic tracing of new threads. */
156 record_btrace_auto_enable (void)
158 DEBUG ("attach thread observer");
160 record_btrace_thread_observer
161 = observer_attach_new_thread (record_btrace_enable_warn);
164 /* Disable automatic tracing of new threads. */
167 record_btrace_auto_disable (void)
169 /* The observer may have been detached, already. */
170 if (record_btrace_thread_observer == NULL)
173 DEBUG ("detach thread observer");
175 observer_detach_new_thread (record_btrace_thread_observer);
176 record_btrace_thread_observer = NULL;
179 /* The record-btrace async event handler function. */
182 record_btrace_handle_async_inferior_event (gdb_client_data data)
184 inferior_event_handler (INF_REG_EVENT, NULL);
187 /* The to_open method of target record-btrace. */
190 record_btrace_open (const char *args, int from_tty)
192 struct cleanup *disable_chain;
193 struct thread_info *tp;
199 if (!target_has_execution)
200 error (_("The program is not being run."));
203 error (_("Record btrace can't debug inferior in non-stop mode."));
205 gdb_assert (record_btrace_thread_observer == NULL);
207 disable_chain = make_cleanup (null_cleanup, NULL);
208 ALL_NON_EXITED_THREADS (tp)
209 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
213 make_cleanup (record_btrace_disable_callback, tp);
216 record_btrace_auto_enable ();
218 push_target (&record_btrace_ops);
220 record_btrace_async_inferior_event_handler
221 = create_async_event_handler (record_btrace_handle_async_inferior_event,
223 record_btrace_generating_corefile = 0;
225 observer_notify_record_changed (current_inferior (), 1);
227 discard_cleanups (disable_chain);
230 /* The to_stop_recording method of target record-btrace. */
233 record_btrace_stop_recording (struct target_ops *self)
235 struct thread_info *tp;
237 DEBUG ("stop recording");
239 record_btrace_auto_disable ();
241 ALL_NON_EXITED_THREADS (tp)
242 if (tp->btrace.target != NULL)
246 /* The to_close method of target record-btrace. */
249 record_btrace_close (struct target_ops *self)
251 struct thread_info *tp;
253 if (record_btrace_async_inferior_event_handler != NULL)
254 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
256 /* Make sure automatic recording gets disabled even if we did not stop
257 recording before closing the record-btrace target. */
258 record_btrace_auto_disable ();
260 /* We should have already stopped recording.
261 Tear down btrace in case we have not. */
262 ALL_NON_EXITED_THREADS (tp)
263 btrace_teardown (tp);
266 /* The to_async method of target record-btrace. */
269 record_btrace_async (struct target_ops *ops,
270 void (*callback) (enum inferior_event_type event_type,
274 if (callback != NULL)
275 mark_async_event_handler (record_btrace_async_inferior_event_handler);
277 clear_async_event_handler (record_btrace_async_inferior_event_handler);
279 ops->beneath->to_async (ops->beneath, callback, context);
282 /* The to_info_record method of target record-btrace. */
285 record_btrace_info (struct target_ops *self)
287 struct btrace_thread_info *btinfo;
288 struct thread_info *tp;
289 unsigned int insns, calls;
293 tp = find_thread_ptid (inferior_ptid);
295 error (_("No thread."));
302 btinfo = &tp->btrace;
304 if (!btrace_is_empty (tp))
306 struct btrace_call_iterator call;
307 struct btrace_insn_iterator insn;
309 btrace_call_end (&call, btinfo);
310 btrace_call_prev (&call, 1);
311 calls = btrace_call_number (&call);
313 btrace_insn_end (&insn, btinfo);
314 btrace_insn_prev (&insn, 1);
315 insns = btrace_insn_number (&insn);
318 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
319 "%d (%s).\n"), insns, calls, tp->num,
320 target_pid_to_str (tp->ptid));
322 if (btrace_is_replaying (tp))
323 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
324 btrace_insn_number (btinfo->replay));
327 /* Print an unsigned int. */
330 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
332 ui_out_field_fmt (uiout, fld, "%u", val);
335 /* Disassemble a section of the recorded instruction trace. */
338 btrace_insn_history (struct ui_out *uiout,
339 const struct btrace_insn_iterator *begin,
340 const struct btrace_insn_iterator *end, int flags)
342 struct gdbarch *gdbarch;
343 struct btrace_insn_iterator it;
345 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
346 btrace_insn_number (end));
348 gdbarch = target_gdbarch ();
350 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
352 const struct btrace_insn *insn;
354 insn = btrace_insn_get (&it);
356 /* Print the instruction index. */
357 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
358 ui_out_text (uiout, "\t");
360 /* Disassembly with '/m' flag may not produce the expected result.
362 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
366 /* The to_insn_history method of target record-btrace. */
369 record_btrace_insn_history (struct target_ops *self, int size, int flags)
371 struct btrace_thread_info *btinfo;
372 struct btrace_insn_history *history;
373 struct btrace_insn_iterator begin, end;
374 struct cleanup *uiout_cleanup;
375 struct ui_out *uiout;
376 unsigned int context, covered;
378 uiout = current_uiout;
379 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
381 context = abs (size);
383 error (_("Bad record instruction-history-size."));
385 btinfo = require_btrace ();
386 history = btinfo->insn_history;
389 struct btrace_insn_iterator *replay;
391 DEBUG ("insn-history (0x%x): %d", flags, size);
393 /* If we're replaying, we start at the replay position. Otherwise, we
394 start at the tail of the trace. */
395 replay = btinfo->replay;
399 btrace_insn_end (&begin, btinfo);
401 /* We start from here and expand in the requested direction. Then we
402 expand in the other direction, as well, to fill up any remaining
407 /* We want the current position covered, as well. */
408 covered = btrace_insn_next (&end, 1);
409 covered += btrace_insn_prev (&begin, context - covered);
410 covered += btrace_insn_next (&end, context - covered);
414 covered = btrace_insn_next (&end, context);
415 covered += btrace_insn_prev (&begin, context - covered);
420 begin = history->begin;
423 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
424 btrace_insn_number (&begin), btrace_insn_number (&end));
429 covered = btrace_insn_prev (&begin, context);
434 covered = btrace_insn_next (&end, context);
439 btrace_insn_history (uiout, &begin, &end, flags);
443 printf_unfiltered (_("At the start of the branch trace record.\n"));
445 printf_unfiltered (_("At the end of the branch trace record.\n"));
448 btrace_set_insn_history (btinfo, &begin, &end);
449 do_cleanups (uiout_cleanup);
452 /* The to_insn_history_range method of target record-btrace. */
455 record_btrace_insn_history_range (struct target_ops *self,
456 ULONGEST from, ULONGEST to, int flags)
458 struct btrace_thread_info *btinfo;
459 struct btrace_insn_history *history;
460 struct btrace_insn_iterator begin, end;
461 struct cleanup *uiout_cleanup;
462 struct ui_out *uiout;
463 unsigned int low, high;
466 uiout = current_uiout;
467 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
472 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
474 /* Check for wrap-arounds. */
475 if (low != from || high != to)
476 error (_("Bad range."));
479 error (_("Bad range."));
481 btinfo = require_btrace ();
483 found = btrace_find_insn_by_number (&begin, btinfo, low);
485 error (_("Range out of bounds."));
487 found = btrace_find_insn_by_number (&end, btinfo, high);
490 /* Silently truncate the range. */
491 btrace_insn_end (&end, btinfo);
495 /* We want both begin and end to be inclusive. */
496 btrace_insn_next (&end, 1);
499 btrace_insn_history (uiout, &begin, &end, flags);
500 btrace_set_insn_history (btinfo, &begin, &end);
502 do_cleanups (uiout_cleanup);
505 /* The to_insn_history_from method of target record-btrace. */
508 record_btrace_insn_history_from (struct target_ops *self,
509 ULONGEST from, int size, int flags)
511 ULONGEST begin, end, context;
513 context = abs (size);
515 error (_("Bad record instruction-history-size."));
524 begin = from - context + 1;
529 end = from + context - 1;
531 /* Check for wrap-around. */
536 record_btrace_insn_history_range (self, begin, end, flags);
539 /* Print the instruction number range for a function call history line. */
542 btrace_call_history_insn_range (struct ui_out *uiout,
543 const struct btrace_function *bfun)
545 unsigned int begin, end, size;
547 size = VEC_length (btrace_insn_s, bfun->insn);
548 gdb_assert (size > 0);
550 begin = bfun->insn_offset;
551 end = begin + size - 1;
553 ui_out_field_uint (uiout, "insn begin", begin);
554 ui_out_text (uiout, ",");
555 ui_out_field_uint (uiout, "insn end", end);
558 /* Print the source line information for a function call history line. */
561 btrace_call_history_src_line (struct ui_out *uiout,
562 const struct btrace_function *bfun)
571 ui_out_field_string (uiout, "file",
572 symtab_to_filename_for_display (symbol_symtab (sym)));
574 begin = bfun->lbegin;
580 ui_out_text (uiout, ":");
581 ui_out_field_int (uiout, "min line", begin);
586 ui_out_text (uiout, ",");
587 ui_out_field_int (uiout, "max line", end);
590 /* Get the name of a branch trace function. */
593 btrace_get_bfun_name (const struct btrace_function *bfun)
595 struct minimal_symbol *msym;
605 return SYMBOL_PRINT_NAME (sym);
606 else if (msym != NULL)
607 return MSYMBOL_PRINT_NAME (msym);
612 /* Disassemble a section of the recorded function trace. */
615 btrace_call_history (struct ui_out *uiout,
616 const struct btrace_thread_info *btinfo,
617 const struct btrace_call_iterator *begin,
618 const struct btrace_call_iterator *end,
619 enum record_print_flag flags)
621 struct btrace_call_iterator it;
623 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
624 btrace_call_number (end));
626 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
628 const struct btrace_function *bfun;
629 struct minimal_symbol *msym;
632 bfun = btrace_call_get (&it);
636 /* Print the function index. */
637 ui_out_field_uint (uiout, "index", bfun->number);
638 ui_out_text (uiout, "\t");
640 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
642 int level = bfun->level + btinfo->level, i;
644 for (i = 0; i < level; ++i)
645 ui_out_text (uiout, " ");
649 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
650 else if (msym != NULL)
651 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
652 else if (!ui_out_is_mi_like_p (uiout))
653 ui_out_field_string (uiout, "function", "??");
655 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
657 ui_out_text (uiout, _("\tinst "));
658 btrace_call_history_insn_range (uiout, bfun);
661 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
663 ui_out_text (uiout, _("\tat "));
664 btrace_call_history_src_line (uiout, bfun);
667 ui_out_text (uiout, "\n");
671 /* The to_call_history method of target record-btrace. */
674 record_btrace_call_history (struct target_ops *self, int size, int flags)
676 struct btrace_thread_info *btinfo;
677 struct btrace_call_history *history;
678 struct btrace_call_iterator begin, end;
679 struct cleanup *uiout_cleanup;
680 struct ui_out *uiout;
681 unsigned int context, covered;
683 uiout = current_uiout;
684 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
686 context = abs (size);
688 error (_("Bad record function-call-history-size."));
690 btinfo = require_btrace ();
691 history = btinfo->call_history;
694 struct btrace_insn_iterator *replay;
696 DEBUG ("call-history (0x%x): %d", flags, size);
698 /* If we're replaying, we start at the replay position. Otherwise, we
699 start at the tail of the trace. */
700 replay = btinfo->replay;
703 begin.function = replay->function;
704 begin.btinfo = btinfo;
707 btrace_call_end (&begin, btinfo);
709 /* We start from here and expand in the requested direction. Then we
710 expand in the other direction, as well, to fill up any remaining
715 /* We want the current position covered, as well. */
716 covered = btrace_call_next (&end, 1);
717 covered += btrace_call_prev (&begin, context - covered);
718 covered += btrace_call_next (&end, context - covered);
722 covered = btrace_call_next (&end, context);
723 covered += btrace_call_prev (&begin, context- covered);
728 begin = history->begin;
731 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
732 btrace_call_number (&begin), btrace_call_number (&end));
737 covered = btrace_call_prev (&begin, context);
742 covered = btrace_call_next (&end, context);
747 btrace_call_history (uiout, btinfo, &begin, &end, flags);
751 printf_unfiltered (_("At the start of the branch trace record.\n"));
753 printf_unfiltered (_("At the end of the branch trace record.\n"));
756 btrace_set_call_history (btinfo, &begin, &end);
757 do_cleanups (uiout_cleanup);
760 /* The to_call_history_range method of target record-btrace. */
763 record_btrace_call_history_range (struct target_ops *self,
764 ULONGEST from, ULONGEST to, int flags)
766 struct btrace_thread_info *btinfo;
767 struct btrace_call_history *history;
768 struct btrace_call_iterator begin, end;
769 struct cleanup *uiout_cleanup;
770 struct ui_out *uiout;
771 unsigned int low, high;
774 uiout = current_uiout;
775 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
780 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
782 /* Check for wrap-arounds. */
783 if (low != from || high != to)
784 error (_("Bad range."));
787 error (_("Bad range."));
789 btinfo = require_btrace ();
791 found = btrace_find_call_by_number (&begin, btinfo, low);
793 error (_("Range out of bounds."));
795 found = btrace_find_call_by_number (&end, btinfo, high);
798 /* Silently truncate the range. */
799 btrace_call_end (&end, btinfo);
803 /* We want both begin and end to be inclusive. */
804 btrace_call_next (&end, 1);
807 btrace_call_history (uiout, btinfo, &begin, &end, flags);
808 btrace_set_call_history (btinfo, &begin, &end);
810 do_cleanups (uiout_cleanup);
813 /* The to_call_history_from method of target record-btrace. */
816 record_btrace_call_history_from (struct target_ops *self,
817 ULONGEST from, int size, int flags)
819 ULONGEST begin, end, context;
821 context = abs (size);
823 error (_("Bad record function-call-history-size."));
832 begin = from - context + 1;
837 end = from + context - 1;
839 /* Check for wrap-around. */
844 record_btrace_call_history_range (self, begin, end, flags);
847 /* The to_record_is_replaying method of target record-btrace. */
850 record_btrace_is_replaying (struct target_ops *self)
852 struct thread_info *tp;
854 ALL_NON_EXITED_THREADS (tp)
855 if (btrace_is_replaying (tp))
861 /* The to_xfer_partial method of target record-btrace. */
863 static enum target_xfer_status
864 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
865 const char *annex, gdb_byte *readbuf,
866 const gdb_byte *writebuf, ULONGEST offset,
867 ULONGEST len, ULONGEST *xfered_len)
869 struct target_ops *t;
871 /* Filter out requests that don't make sense during replay. */
872 if (replay_memory_access == replay_memory_access_read_only
873 && !record_btrace_generating_corefile
874 && record_btrace_is_replaying (ops))
878 case TARGET_OBJECT_MEMORY:
880 struct target_section *section;
882 /* We do not allow writing memory in general. */
883 if (writebuf != NULL)
886 return TARGET_XFER_UNAVAILABLE;
889 /* We allow reading readonly memory. */
890 section = target_section_by_addr (ops, offset);
893 /* Check if the section we found is readonly. */
894 if ((bfd_get_section_flags (section->the_bfd_section->owner,
895 section->the_bfd_section)
896 & SEC_READONLY) != 0)
898 /* Truncate the request to fit into this section. */
899 len = min (len, section->endaddr - offset);
905 return TARGET_XFER_UNAVAILABLE;
910 /* Forward the request. */
912 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
913 offset, len, xfered_len);
916 /* The to_insert_breakpoint method of target record-btrace. */
919 record_btrace_insert_breakpoint (struct target_ops *ops,
920 struct gdbarch *gdbarch,
921 struct bp_target_info *bp_tgt)
923 volatile struct gdb_exception except;
927 /* Inserting breakpoints requires accessing memory. Allow it for the
928 duration of this function. */
929 old = replay_memory_access;
930 replay_memory_access = replay_memory_access_read_write;
933 TRY_CATCH (except, RETURN_MASK_ALL)
934 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
936 replay_memory_access = old;
938 if (except.reason < 0)
939 throw_exception (except);
944 /* The to_remove_breakpoint method of target record-btrace. */
947 record_btrace_remove_breakpoint (struct target_ops *ops,
948 struct gdbarch *gdbarch,
949 struct bp_target_info *bp_tgt)
951 volatile struct gdb_exception except;
955 /* Removing breakpoints requires accessing memory. Allow it for the
956 duration of this function. */
957 old = replay_memory_access;
958 replay_memory_access = replay_memory_access_read_write;
961 TRY_CATCH (except, RETURN_MASK_ALL)
962 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
964 replay_memory_access = old;
966 if (except.reason < 0)
967 throw_exception (except);
972 /* The to_fetch_registers method of target record-btrace. */
975 record_btrace_fetch_registers (struct target_ops *ops,
976 struct regcache *regcache, int regno)
978 struct btrace_insn_iterator *replay;
979 struct thread_info *tp;
981 tp = find_thread_ptid (inferior_ptid);
982 gdb_assert (tp != NULL);
984 replay = tp->btrace.replay;
985 if (replay != NULL && !record_btrace_generating_corefile)
987 const struct btrace_insn *insn;
988 struct gdbarch *gdbarch;
991 gdbarch = get_regcache_arch (regcache);
992 pcreg = gdbarch_pc_regnum (gdbarch);
996 /* We can only provide the PC register. */
997 if (regno >= 0 && regno != pcreg)
1000 insn = btrace_insn_get (replay);
1001 gdb_assert (insn != NULL);
1003 regcache_raw_supply (regcache, regno, &insn->pc);
1007 struct target_ops *t = ops->beneath;
1009 t->to_fetch_registers (t, regcache, regno);
1013 /* The to_store_registers method of target record-btrace. */
1016 record_btrace_store_registers (struct target_ops *ops,
1017 struct regcache *regcache, int regno)
1019 struct target_ops *t;
1021 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1022 error (_("This record target does not allow writing registers."));
1024 gdb_assert (may_write_registers != 0);
1027 t->to_store_registers (t, regcache, regno);
1030 /* The to_prepare_to_store method of target record-btrace. */
1033 record_btrace_prepare_to_store (struct target_ops *ops,
1034 struct regcache *regcache)
1036 struct target_ops *t;
1038 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1042 t->to_prepare_to_store (t, regcache);
1045 /* The branch trace frame cache. */
1047 struct btrace_frame_cache
1050 struct thread_info *tp;
1052 /* The frame info. */
1053 struct frame_info *frame;
1055 /* The branch trace function segment. */
1056 const struct btrace_function *bfun;
1059 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1061 static htab_t bfcache;
1063 /* hash_f for htab_create_alloc of bfcache. */
1066 bfcache_hash (const void *arg)
1068 const struct btrace_frame_cache *cache = arg;
1070 return htab_hash_pointer (cache->frame);
1073 /* eq_f for htab_create_alloc of bfcache. */
1076 bfcache_eq (const void *arg1, const void *arg2)
1078 const struct btrace_frame_cache *cache1 = arg1;
1079 const struct btrace_frame_cache *cache2 = arg2;
1081 return cache1->frame == cache2->frame;
1084 /* Create a new btrace frame cache. */
1086 static struct btrace_frame_cache *
1087 bfcache_new (struct frame_info *frame)
1089 struct btrace_frame_cache *cache;
1092 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1093 cache->frame = frame;
1095 slot = htab_find_slot (bfcache, cache, INSERT);
1096 gdb_assert (*slot == NULL);
1102 /* Extract the branch trace function from a branch trace frame. */
1104 static const struct btrace_function *
1105 btrace_get_frame_function (struct frame_info *frame)
1107 const struct btrace_frame_cache *cache;
1108 const struct btrace_function *bfun;
1109 struct btrace_frame_cache pattern;
1112 pattern.frame = frame;
1114 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1122 /* Implement stop_reason method for record_btrace_frame_unwind. */
1124 static enum unwind_stop_reason
1125 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1128 const struct btrace_frame_cache *cache;
1129 const struct btrace_function *bfun;
1131 cache = *this_cache;
1133 gdb_assert (bfun != NULL);
1135 if (bfun->up == NULL)
1136 return UNWIND_UNAVAILABLE;
1138 return UNWIND_NO_REASON;
1141 /* Implement this_id method for record_btrace_frame_unwind. */
1144 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1145 struct frame_id *this_id)
1147 const struct btrace_frame_cache *cache;
1148 const struct btrace_function *bfun;
1149 CORE_ADDR code, special;
1151 cache = *this_cache;
1154 gdb_assert (bfun != NULL);
1156 while (bfun->segment.prev != NULL)
1157 bfun = bfun->segment.prev;
1159 code = get_frame_func (this_frame);
1160 special = bfun->number;
1162 *this_id = frame_id_build_unavailable_stack_special (code, special);
1164 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1165 btrace_get_bfun_name (cache->bfun),
1166 core_addr_to_string_nz (this_id->code_addr),
1167 core_addr_to_string_nz (this_id->special_addr));
1170 /* Implement prev_register method for record_btrace_frame_unwind. */
1172 static struct value *
1173 record_btrace_frame_prev_register (struct frame_info *this_frame,
1177 const struct btrace_frame_cache *cache;
1178 const struct btrace_function *bfun, *caller;
1179 const struct btrace_insn *insn;
1180 struct gdbarch *gdbarch;
1184 gdbarch = get_frame_arch (this_frame);
1185 pcreg = gdbarch_pc_regnum (gdbarch);
1186 if (pcreg < 0 || regnum != pcreg)
1187 throw_error (NOT_AVAILABLE_ERROR,
1188 _("Registers are not available in btrace record history"));
1190 cache = *this_cache;
1192 gdb_assert (bfun != NULL);
1196 throw_error (NOT_AVAILABLE_ERROR,
1197 _("No caller in btrace record history"));
1199 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1201 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1206 insn = VEC_last (btrace_insn_s, caller->insn);
1209 pc += gdb_insn_length (gdbarch, pc);
1212 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1213 btrace_get_bfun_name (bfun), bfun->level,
1214 core_addr_to_string_nz (pc));
1216 return frame_unwind_got_address (this_frame, regnum, pc);
1219 /* Implement sniffer method for record_btrace_frame_unwind. */
1222 record_btrace_frame_sniffer (const struct frame_unwind *self,
1223 struct frame_info *this_frame,
1226 const struct btrace_function *bfun;
1227 struct btrace_frame_cache *cache;
1228 struct thread_info *tp;
1229 struct frame_info *next;
1231 /* THIS_FRAME does not contain a reference to its thread. */
1232 tp = find_thread_ptid (inferior_ptid);
1233 gdb_assert (tp != NULL);
1236 next = get_next_frame (this_frame);
1239 const struct btrace_insn_iterator *replay;
1241 replay = tp->btrace.replay;
1243 bfun = replay->function;
1247 const struct btrace_function *callee;
1249 callee = btrace_get_frame_function (next);
1250 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1257 DEBUG ("[frame] sniffed frame for %s on level %d",
1258 btrace_get_bfun_name (bfun), bfun->level);
1260 /* This is our frame. Initialize the frame cache. */
1261 cache = bfcache_new (this_frame);
1265 *this_cache = cache;
1269 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1272 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1273 struct frame_info *this_frame,
1276 const struct btrace_function *bfun, *callee;
1277 struct btrace_frame_cache *cache;
1278 struct frame_info *next;
1280 next = get_next_frame (this_frame);
1284 callee = btrace_get_frame_function (next);
1288 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1295 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1296 btrace_get_bfun_name (bfun), bfun->level);
1298 /* This is our frame. Initialize the frame cache. */
1299 cache = bfcache_new (this_frame);
1300 cache->tp = find_thread_ptid (inferior_ptid);
1303 *this_cache = cache;
1308 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1310 struct btrace_frame_cache *cache;
1315 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1316 gdb_assert (slot != NULL);
1318 htab_remove_elt (bfcache, cache);
1321 /* btrace recording does not store previous memory content, neither the stack
1322 frames content. Any unwinding would return errorneous results as the stack
1323 contents no longer matches the changed PC value restored from history.
1324 Therefore this unwinder reports any possibly unwound registers as
1327 const struct frame_unwind record_btrace_frame_unwind =
1330 record_btrace_frame_unwind_stop_reason,
1331 record_btrace_frame_this_id,
1332 record_btrace_frame_prev_register,
1334 record_btrace_frame_sniffer,
1335 record_btrace_frame_dealloc_cache
1338 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1341 record_btrace_frame_unwind_stop_reason,
1342 record_btrace_frame_this_id,
1343 record_btrace_frame_prev_register,
1345 record_btrace_tailcall_frame_sniffer,
1346 record_btrace_frame_dealloc_cache
1349 /* Implement the to_get_unwinder method. */
1351 static const struct frame_unwind *
1352 record_btrace_to_get_unwinder (struct target_ops *self)
1354 return &record_btrace_frame_unwind;
1357 /* Implement the to_get_tailcall_unwinder method. */
1359 static const struct frame_unwind *
1360 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1362 return &record_btrace_tailcall_frame_unwind;
1365 /* Indicate that TP should be resumed according to FLAG. */
1368 record_btrace_resume_thread (struct thread_info *tp,
1369 enum btrace_thread_flag flag)
1371 struct btrace_thread_info *btinfo;
1373 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1375 btinfo = &tp->btrace;
1377 if ((btinfo->flags & BTHR_MOVE) != 0)
1378 error (_("Thread already moving."));
1380 /* Fetch the latest branch trace. */
1383 btinfo->flags |= flag;
1386 /* Find the thread to resume given a PTID. */
1388 static struct thread_info *
1389 record_btrace_find_resume_thread (ptid_t ptid)
1391 struct thread_info *tp;
1393 /* When asked to resume everything, we pick the current thread. */
1394 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1395 ptid = inferior_ptid;
1397 return find_thread_ptid (ptid);
1400 /* Start replaying a thread. */
1402 static struct btrace_insn_iterator *
1403 record_btrace_start_replaying (struct thread_info *tp)
1405 volatile struct gdb_exception except;
1406 struct btrace_insn_iterator *replay;
1407 struct btrace_thread_info *btinfo;
1410 btinfo = &tp->btrace;
1413 /* We can't start replaying without trace. */
1414 if (btinfo->begin == NULL)
1417 /* Clear the executing flag to allow changes to the current frame.
1418 We are not actually running, yet. We just started a reverse execution
1419 command or a record goto command.
1420 For the latter, EXECUTING is false and this has no effect.
1421 For the former, EXECUTING is true and we're in to_wait, about to
1422 move the thread. Since we need to recompute the stack, we temporarily
1423 set EXECUTING to flase. */
1424 executing = is_executing (tp->ptid);
1425 set_executing (tp->ptid, 0);
1427 /* GDB stores the current frame_id when stepping in order to detects steps
1429 Since frames are computed differently when we're replaying, we need to
1430 recompute those stored frames and fix them up so we can still detect
1431 subroutines after we started replaying. */
1432 TRY_CATCH (except, RETURN_MASK_ALL)
1434 struct frame_info *frame;
1435 struct frame_id frame_id;
1436 int upd_step_frame_id, upd_step_stack_frame_id;
1438 /* The current frame without replaying - computed via normal unwind. */
1439 frame = get_current_frame ();
1440 frame_id = get_frame_id (frame);
1442 /* Check if we need to update any stepping-related frame id's. */
1443 upd_step_frame_id = frame_id_eq (frame_id,
1444 tp->control.step_frame_id);
1445 upd_step_stack_frame_id = frame_id_eq (frame_id,
1446 tp->control.step_stack_frame_id);
1448 /* We start replaying at the end of the branch trace. This corresponds
1449 to the current instruction. */
1450 replay = xmalloc (sizeof (*replay));
1451 btrace_insn_end (replay, btinfo);
1453 /* We're not replaying, yet. */
1454 gdb_assert (btinfo->replay == NULL);
1455 btinfo->replay = replay;
1457 /* Make sure we're not using any stale registers. */
1458 registers_changed_ptid (tp->ptid);
1460 /* The current frame with replaying - computed via btrace unwind. */
1461 frame = get_current_frame ();
1462 frame_id = get_frame_id (frame);
1464 /* Replace stepping related frames where necessary. */
1465 if (upd_step_frame_id)
1466 tp->control.step_frame_id = frame_id;
1467 if (upd_step_stack_frame_id)
1468 tp->control.step_stack_frame_id = frame_id;
1471 /* Restore the previous execution state. */
1472 set_executing (tp->ptid, executing);
1474 if (except.reason < 0)
1476 xfree (btinfo->replay);
1477 btinfo->replay = NULL;
1479 registers_changed_ptid (tp->ptid);
1481 throw_exception (except);
1487 /* Stop replaying a thread. */
1490 record_btrace_stop_replaying (struct thread_info *tp)
1492 struct btrace_thread_info *btinfo;
1494 btinfo = &tp->btrace;
1496 xfree (btinfo->replay);
1497 btinfo->replay = NULL;
1499 /* Make sure we're not leaving any stale registers. */
1500 registers_changed_ptid (tp->ptid);
1503 /* The to_resume method of target record-btrace. */
1506 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1507 enum gdb_signal signal)
1509 struct thread_info *tp, *other;
1510 enum btrace_thread_flag flag;
1512 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1514 /* Store the execution direction of the last resume. */
1515 record_btrace_resume_exec_dir = execution_direction;
1517 tp = record_btrace_find_resume_thread (ptid);
1519 error (_("Cannot find thread to resume."));
1521 /* Stop replaying other threads if the thread to resume is not replaying. */
1522 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1523 ALL_NON_EXITED_THREADS (other)
1524 record_btrace_stop_replaying (other);
1526 /* As long as we're not replaying, just forward the request. */
1527 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1530 return ops->to_resume (ops, ptid, step, signal);
1533 /* Compute the btrace thread flag for the requested move. */
1535 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1537 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1539 /* At the moment, we only move a single thread. We could also move
1540 all threads in parallel by single-stepping each resumed thread
1541 until the first runs into an event.
1542 When we do that, we would want to continue all other threads.
1543 For now, just resume one thread to not confuse to_wait. */
1544 record_btrace_resume_thread (tp, flag);
1546 /* We just indicate the resume intent here. The actual stepping happens in
1547 record_btrace_wait below. */
1549 /* Async support. */
1550 if (target_can_async_p ())
1552 target_async (inferior_event_handler, 0);
1553 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1557 /* Find a thread to move. */
1559 static struct thread_info *
1560 record_btrace_find_thread_to_move (ptid_t ptid)
1562 struct thread_info *tp;
1564 /* First check the parameter thread. */
1565 tp = find_thread_ptid (ptid);
1566 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1569 /* Otherwise, find one other thread that has been resumed. */
1570 ALL_NON_EXITED_THREADS (tp)
1571 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1577 /* Return a target_waitstatus indicating that we ran out of history. */
1579 static struct target_waitstatus
1580 btrace_step_no_history (void)
1582 struct target_waitstatus status;
1584 status.kind = TARGET_WAITKIND_NO_HISTORY;
1589 /* Return a target_waitstatus indicating that a step finished. */
1591 static struct target_waitstatus
1592 btrace_step_stopped (void)
1594 struct target_waitstatus status;
1596 status.kind = TARGET_WAITKIND_STOPPED;
1597 status.value.sig = GDB_SIGNAL_TRAP;
1602 /* Clear the record histories. */
1605 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1607 xfree (btinfo->insn_history);
1608 xfree (btinfo->call_history);
1610 btinfo->insn_history = NULL;
1611 btinfo->call_history = NULL;
1614 /* Step a single thread. */
1616 static struct target_waitstatus
1617 record_btrace_step_thread (struct thread_info *tp)
1619 struct btrace_insn_iterator *replay, end;
1620 struct btrace_thread_info *btinfo;
1621 struct address_space *aspace;
1622 struct inferior *inf;
1623 enum btrace_thread_flag flags;
1626 /* We can't step without an execution history. */
1627 if (btrace_is_empty (tp))
1628 return btrace_step_no_history ();
1630 btinfo = &tp->btrace;
1631 replay = btinfo->replay;
1633 flags = btinfo->flags & BTHR_MOVE;
1634 btinfo->flags &= ~BTHR_MOVE;
1636 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1641 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1644 /* We're done if we're not replaying. */
1646 return btrace_step_no_history ();
1648 /* We are always able to step at least once. */
1649 steps = btrace_insn_next (replay, 1);
1650 gdb_assert (steps == 1);
1652 /* Determine the end of the instruction trace. */
1653 btrace_insn_end (&end, btinfo);
1655 /* We stop replaying if we reached the end of the trace. */
1656 if (btrace_insn_cmp (replay, &end) == 0)
1657 record_btrace_stop_replaying (tp);
1659 return btrace_step_stopped ();
1662 /* Start replaying if we're not already doing so. */
1664 replay = record_btrace_start_replaying (tp);
1666 /* If we can't step any further, we reached the end of the history. */
1667 steps = btrace_insn_prev (replay, 1);
1669 return btrace_step_no_history ();
1671 return btrace_step_stopped ();
1674 /* We're done if we're not replaying. */
1676 return btrace_step_no_history ();
1678 inf = find_inferior_ptid (tp->ptid);
1679 aspace = inf->aspace;
1681 /* Determine the end of the instruction trace. */
1682 btrace_insn_end (&end, btinfo);
1686 const struct btrace_insn *insn;
1688 /* We are always able to step at least once. */
1689 steps = btrace_insn_next (replay, 1);
1690 gdb_assert (steps == 1);
1692 /* We stop replaying if we reached the end of the trace. */
1693 if (btrace_insn_cmp (replay, &end) == 0)
1695 record_btrace_stop_replaying (tp);
1696 return btrace_step_no_history ();
1699 insn = btrace_insn_get (replay);
1702 DEBUG ("stepping %d (%s) ... %s", tp->num,
1703 target_pid_to_str (tp->ptid),
1704 core_addr_to_string_nz (insn->pc));
1706 if (breakpoint_here_p (aspace, insn->pc))
1707 return btrace_step_stopped ();
1711 /* Start replaying if we're not already doing so. */
1713 replay = record_btrace_start_replaying (tp);
1715 inf = find_inferior_ptid (tp->ptid);
1716 aspace = inf->aspace;
1720 const struct btrace_insn *insn;
1722 /* If we can't step any further, we're done. */
1723 steps = btrace_insn_prev (replay, 1);
1725 return btrace_step_no_history ();
1727 insn = btrace_insn_get (replay);
1730 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1731 target_pid_to_str (tp->ptid),
1732 core_addr_to_string_nz (insn->pc));
1734 if (breakpoint_here_p (aspace, insn->pc))
1735 return btrace_step_stopped ();
1740 /* The to_wait method of target record-btrace. */
1743 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1744 struct target_waitstatus *status, int options)
1746 struct thread_info *tp, *other;
1748 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1750 /* As long as we're not replaying, just forward the request. */
1751 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1754 return ops->to_wait (ops, ptid, status, options);
1757 /* Let's find a thread to move. */
1758 tp = record_btrace_find_thread_to_move (ptid);
1761 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1763 status->kind = TARGET_WAITKIND_IGNORE;
1764 return minus_one_ptid;
1767 /* We only move a single thread. We're not able to correlate threads. */
1768 *status = record_btrace_step_thread (tp);
1770 /* Stop all other threads. */
1772 ALL_NON_EXITED_THREADS (other)
1773 other->btrace.flags &= ~BTHR_MOVE;
1775 /* Start record histories anew from the current position. */
1776 record_btrace_clear_histories (&tp->btrace);
1778 /* We moved the replay position but did not update registers. */
1779 registers_changed_ptid (tp->ptid);
1784 /* The to_can_execute_reverse method of target record-btrace. */
1787 record_btrace_can_execute_reverse (struct target_ops *self)
1792 /* The to_decr_pc_after_break method of target record-btrace. */
1795 record_btrace_decr_pc_after_break (struct target_ops *ops,
1796 struct gdbarch *gdbarch)
1798 /* When replaying, we do not actually execute the breakpoint instruction
1799 so there is no need to adjust the PC after hitting a breakpoint. */
1800 if (record_btrace_is_replaying (ops))
1803 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1806 /* The to_update_thread_list method of target record-btrace. */
1809 record_btrace_update_thread_list (struct target_ops *ops)
1811 /* We don't add or remove threads during replay. */
1812 if (record_btrace_is_replaying (ops))
1815 /* Forward the request. */
1817 ops->to_update_thread_list (ops);
1820 /* The to_thread_alive method of target record-btrace. */
1823 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1825 /* We don't add or remove threads during replay. */
1826 if (record_btrace_is_replaying (ops))
1827 return find_thread_ptid (ptid) != NULL;
1829 /* Forward the request. */
1831 return ops->to_thread_alive (ops, ptid);
1834 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1838 record_btrace_set_replay (struct thread_info *tp,
1839 const struct btrace_insn_iterator *it)
1841 struct btrace_thread_info *btinfo;
1843 btinfo = &tp->btrace;
1845 if (it == NULL || it->function == NULL)
1846 record_btrace_stop_replaying (tp);
1849 if (btinfo->replay == NULL)
1850 record_btrace_start_replaying (tp);
1851 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1854 *btinfo->replay = *it;
1855 registers_changed_ptid (tp->ptid);
1858 /* Start anew from the new replay position. */
1859 record_btrace_clear_histories (btinfo);
1862 /* The to_goto_record_begin method of target record-btrace. */
1865 record_btrace_goto_begin (struct target_ops *self)
1867 struct thread_info *tp;
1868 struct btrace_insn_iterator begin;
1870 tp = require_btrace_thread ();
1872 btrace_insn_begin (&begin, &tp->btrace);
1873 record_btrace_set_replay (tp, &begin);
1875 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1878 /* The to_goto_record_end method of target record-btrace. */
1881 record_btrace_goto_end (struct target_ops *ops)
1883 struct thread_info *tp;
1885 tp = require_btrace_thread ();
1887 record_btrace_set_replay (tp, NULL);
1889 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1892 /* The to_goto_record method of target record-btrace. */
1895 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1897 struct thread_info *tp;
1898 struct btrace_insn_iterator it;
1899 unsigned int number;
1904 /* Check for wrap-arounds. */
1906 error (_("Instruction number out of range."));
1908 tp = require_btrace_thread ();
1910 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1912 error (_("No such instruction."));
1914 record_btrace_set_replay (tp, &it);
1916 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1919 /* The to_execution_direction target method. */
1921 static enum exec_direction_kind
1922 record_btrace_execution_direction (struct target_ops *self)
1924 return record_btrace_resume_exec_dir;
1927 /* The to_prepare_to_generate_core target method. */
1930 record_btrace_prepare_to_generate_core (struct target_ops *self)
1932 record_btrace_generating_corefile = 1;
1935 /* The to_done_generating_core target method. */
1938 record_btrace_done_generating_core (struct target_ops *self)
1940 record_btrace_generating_corefile = 0;
1943 /* Initialize the record-btrace target ops. */
1946 init_record_btrace_ops (void)
1948 struct target_ops *ops;
1950 ops = &record_btrace_ops;
1951 ops->to_shortname = "record-btrace";
1952 ops->to_longname = "Branch tracing target";
1953 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1954 ops->to_open = record_btrace_open;
1955 ops->to_close = record_btrace_close;
1956 ops->to_async = record_btrace_async;
1957 ops->to_detach = record_detach;
1958 ops->to_disconnect = record_disconnect;
1959 ops->to_mourn_inferior = record_mourn_inferior;
1960 ops->to_kill = record_kill;
1961 ops->to_stop_recording = record_btrace_stop_recording;
1962 ops->to_info_record = record_btrace_info;
1963 ops->to_insn_history = record_btrace_insn_history;
1964 ops->to_insn_history_from = record_btrace_insn_history_from;
1965 ops->to_insn_history_range = record_btrace_insn_history_range;
1966 ops->to_call_history = record_btrace_call_history;
1967 ops->to_call_history_from = record_btrace_call_history_from;
1968 ops->to_call_history_range = record_btrace_call_history_range;
1969 ops->to_record_is_replaying = record_btrace_is_replaying;
1970 ops->to_xfer_partial = record_btrace_xfer_partial;
1971 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1972 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1973 ops->to_fetch_registers = record_btrace_fetch_registers;
1974 ops->to_store_registers = record_btrace_store_registers;
1975 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1976 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1977 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1978 ops->to_resume = record_btrace_resume;
1979 ops->to_wait = record_btrace_wait;
1980 ops->to_update_thread_list = record_btrace_update_thread_list;
1981 ops->to_thread_alive = record_btrace_thread_alive;
1982 ops->to_goto_record_begin = record_btrace_goto_begin;
1983 ops->to_goto_record_end = record_btrace_goto_end;
1984 ops->to_goto_record = record_btrace_goto;
1985 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1986 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1987 ops->to_execution_direction = record_btrace_execution_direction;
1988 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1989 ops->to_done_generating_core = record_btrace_done_generating_core;
1990 ops->to_stratum = record_stratum;
1991 ops->to_magic = OPS_MAGIC;
1994 /* Alias for "target record". */
1997 cmd_record_btrace_start (char *args, int from_tty)
1999 if (args != NULL && *args != 0)
2000 error (_("Invalid argument."));
2002 execute_command ("target record-btrace", from_tty);
2005 /* The "set record btrace" command. */
2008 cmd_set_record_btrace (char *args, int from_tty)
2010 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2013 /* The "show record btrace" command. */
2016 cmd_show_record_btrace (char *args, int from_tty)
2018 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2021 /* The "show record btrace replay-memory-access" command. */
2024 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2025 struct cmd_list_element *c, const char *value)
2027 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2028 replay_memory_access);
2031 void _initialize_record_btrace (void);
2033 /* Initialize btrace commands. */
2036 _initialize_record_btrace (void)
2038 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2039 _("Start branch trace recording."),
2041 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2043 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2044 _("Set record options"), &set_record_btrace_cmdlist,
2045 "set record btrace ", 0, &set_record_cmdlist);
2047 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2048 _("Show record options"), &show_record_btrace_cmdlist,
2049 "show record btrace ", 0, &show_record_cmdlist);
2051 add_setshow_enum_cmd ("replay-memory-access", no_class,
2052 replay_memory_access_types, &replay_memory_access, _("\
2053 Set what memory accesses are allowed during replay."), _("\
2054 Show what memory accesses are allowed during replay."),
2055 _("Default is READ-ONLY.\n\n\
2056 The btrace record target does not trace data.\n\
2057 The memory therefore corresponds to the live target and not \
2058 to the current replay position.\n\n\
2059 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2060 When READ-WRITE, allow accesses to read-only and read-write memory during \
2062 NULL, cmd_show_replay_memory_access,
2063 &set_record_btrace_cmdlist,
2064 &show_record_btrace_cmdlist);
2066 init_record_btrace_ops ();
2067 add_target (&record_btrace_ops);
2069 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,