1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
79 /* Print a record-btrace debug message. Use do ... while (0) to avoid
80 ambiguities when used in if statements. */
82 #define DEBUG(msg, args...) \
85 if (record_debug != 0) \
86 fprintf_unfiltered (gdb_stdlog, \
87 "[record-btrace] " msg "\n", ##args); \
92 /* Update the branch trace for the current thread and return a pointer to its
95 Throws an error if there is no thread or no trace. This function never
98 static struct thread_info *
99 require_btrace_thread (void)
101 struct thread_info *tp;
105 tp = find_thread_ptid (inferior_ptid);
107 error (_("No thread."));
111 if (btrace_is_empty (tp))
112 error (_("No trace."));
117 /* Update the branch trace for the current thread and return a pointer to its
118 branch trace information struct.
120 Throws an error if there is no thread or no trace. This function never
123 static struct btrace_thread_info *
124 require_btrace (void)
126 struct thread_info *tp;
128 tp = require_btrace_thread ();
133 /* Enable branch tracing for one thread. Warn on errors. */
136 record_btrace_enable_warn (struct thread_info *tp)
138 volatile struct gdb_exception error;
140 TRY_CATCH (error, RETURN_MASK_ERROR)
141 btrace_enable (tp, &record_btrace_conf);
143 if (error.message != NULL)
144 warning ("%s", error.message);
147 /* Callback function to disable branch tracing for one thread. */
150 record_btrace_disable_callback (void *arg)
152 struct thread_info *tp;
159 /* Enable automatic tracing of new threads. */
162 record_btrace_auto_enable (void)
164 DEBUG ("attach thread observer");
166 record_btrace_thread_observer
167 = observer_attach_new_thread (record_btrace_enable_warn);
170 /* Disable automatic tracing of new threads. */
173 record_btrace_auto_disable (void)
175 /* The observer may have been detached, already. */
176 if (record_btrace_thread_observer == NULL)
179 DEBUG ("detach thread observer");
181 observer_detach_new_thread (record_btrace_thread_observer);
182 record_btrace_thread_observer = NULL;
185 /* The record-btrace async event handler function. */
188 record_btrace_handle_async_inferior_event (gdb_client_data data)
190 inferior_event_handler (INF_REG_EVENT, NULL);
193 /* The to_open method of target record-btrace. */
196 record_btrace_open (const char *args, int from_tty)
198 struct cleanup *disable_chain;
199 struct thread_info *tp;
205 if (!target_has_execution)
206 error (_("The program is not being run."));
209 error (_("Record btrace can't debug inferior in non-stop mode."));
211 gdb_assert (record_btrace_thread_observer == NULL);
213 disable_chain = make_cleanup (null_cleanup, NULL);
214 ALL_NON_EXITED_THREADS (tp)
215 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
217 btrace_enable (tp, &record_btrace_conf);
219 make_cleanup (record_btrace_disable_callback, tp);
222 record_btrace_auto_enable ();
224 push_target (&record_btrace_ops);
226 record_btrace_async_inferior_event_handler
227 = create_async_event_handler (record_btrace_handle_async_inferior_event,
229 record_btrace_generating_corefile = 0;
231 observer_notify_record_changed (current_inferior (), 1);
233 discard_cleanups (disable_chain);
236 /* The to_stop_recording method of target record-btrace. */
239 record_btrace_stop_recording (struct target_ops *self)
241 struct thread_info *tp;
243 DEBUG ("stop recording");
245 record_btrace_auto_disable ();
247 ALL_NON_EXITED_THREADS (tp)
248 if (tp->btrace.target != NULL)
252 /* The to_close method of target record-btrace. */
255 record_btrace_close (struct target_ops *self)
257 struct thread_info *tp;
259 if (record_btrace_async_inferior_event_handler != NULL)
260 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
262 /* Make sure automatic recording gets disabled even if we did not stop
263 recording before closing the record-btrace target. */
264 record_btrace_auto_disable ();
266 /* We should have already stopped recording.
267 Tear down btrace in case we have not. */
268 ALL_NON_EXITED_THREADS (tp)
269 btrace_teardown (tp);
272 /* The to_async method of target record-btrace. */
275 record_btrace_async (struct target_ops *ops,
276 void (*callback) (enum inferior_event_type event_type,
280 if (callback != NULL)
281 mark_async_event_handler (record_btrace_async_inferior_event_handler);
283 clear_async_event_handler (record_btrace_async_inferior_event_handler);
285 ops->beneath->to_async (ops->beneath, callback, context);
288 /* The to_info_record method of target record-btrace. */
291 record_btrace_info (struct target_ops *self)
293 struct btrace_thread_info *btinfo;
294 const struct btrace_config *conf;
295 struct thread_info *tp;
296 unsigned int insns, calls;
300 tp = find_thread_ptid (inferior_ptid);
302 error (_("No thread."));
304 btinfo = &tp->btrace;
306 conf = btrace_conf (btinfo);
308 printf_unfiltered (_("Recording format: %s.\n"),
309 btrace_format_string (conf->format));
316 if (!btrace_is_empty (tp))
318 struct btrace_call_iterator call;
319 struct btrace_insn_iterator insn;
321 btrace_call_end (&call, btinfo);
322 btrace_call_prev (&call, 1);
323 calls = btrace_call_number (&call);
325 btrace_insn_end (&insn, btinfo);
326 btrace_insn_prev (&insn, 1);
327 insns = btrace_insn_number (&insn);
330 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
331 "%d (%s).\n"), insns, calls, tp->num,
332 target_pid_to_str (tp->ptid));
334 if (btrace_is_replaying (tp))
335 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
336 btrace_insn_number (btinfo->replay));
339 /* Print an unsigned int. */
342 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
344 ui_out_field_fmt (uiout, fld, "%u", val);
347 /* Disassemble a section of the recorded instruction trace. */
350 btrace_insn_history (struct ui_out *uiout,
351 const struct btrace_insn_iterator *begin,
352 const struct btrace_insn_iterator *end, int flags)
354 struct gdbarch *gdbarch;
355 struct btrace_insn_iterator it;
357 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
358 btrace_insn_number (end));
360 gdbarch = target_gdbarch ();
362 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
364 const struct btrace_insn *insn;
366 insn = btrace_insn_get (&it);
368 /* Print the instruction index. */
369 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
370 ui_out_text (uiout, "\t");
372 /* Disassembly with '/m' flag may not produce the expected result.
374 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
378 /* The to_insn_history method of target record-btrace. */
381 record_btrace_insn_history (struct target_ops *self, int size, int flags)
383 struct btrace_thread_info *btinfo;
384 struct btrace_insn_history *history;
385 struct btrace_insn_iterator begin, end;
386 struct cleanup *uiout_cleanup;
387 struct ui_out *uiout;
388 unsigned int context, covered;
390 uiout = current_uiout;
391 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
393 context = abs (size);
395 error (_("Bad record instruction-history-size."));
397 btinfo = require_btrace ();
398 history = btinfo->insn_history;
401 struct btrace_insn_iterator *replay;
403 DEBUG ("insn-history (0x%x): %d", flags, size);
405 /* If we're replaying, we start at the replay position. Otherwise, we
406 start at the tail of the trace. */
407 replay = btinfo->replay;
411 btrace_insn_end (&begin, btinfo);
413 /* We start from here and expand in the requested direction. Then we
414 expand in the other direction, as well, to fill up any remaining
419 /* We want the current position covered, as well. */
420 covered = btrace_insn_next (&end, 1);
421 covered += btrace_insn_prev (&begin, context - covered);
422 covered += btrace_insn_next (&end, context - covered);
426 covered = btrace_insn_next (&end, context);
427 covered += btrace_insn_prev (&begin, context - covered);
432 begin = history->begin;
435 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
436 btrace_insn_number (&begin), btrace_insn_number (&end));
441 covered = btrace_insn_prev (&begin, context);
446 covered = btrace_insn_next (&end, context);
451 btrace_insn_history (uiout, &begin, &end, flags);
455 printf_unfiltered (_("At the start of the branch trace record.\n"));
457 printf_unfiltered (_("At the end of the branch trace record.\n"));
460 btrace_set_insn_history (btinfo, &begin, &end);
461 do_cleanups (uiout_cleanup);
464 /* The to_insn_history_range method of target record-btrace. */
467 record_btrace_insn_history_range (struct target_ops *self,
468 ULONGEST from, ULONGEST to, int flags)
470 struct btrace_thread_info *btinfo;
471 struct btrace_insn_history *history;
472 struct btrace_insn_iterator begin, end;
473 struct cleanup *uiout_cleanup;
474 struct ui_out *uiout;
475 unsigned int low, high;
478 uiout = current_uiout;
479 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
484 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
486 /* Check for wrap-arounds. */
487 if (low != from || high != to)
488 error (_("Bad range."));
491 error (_("Bad range."));
493 btinfo = require_btrace ();
495 found = btrace_find_insn_by_number (&begin, btinfo, low);
497 error (_("Range out of bounds."));
499 found = btrace_find_insn_by_number (&end, btinfo, high);
502 /* Silently truncate the range. */
503 btrace_insn_end (&end, btinfo);
507 /* We want both begin and end to be inclusive. */
508 btrace_insn_next (&end, 1);
511 btrace_insn_history (uiout, &begin, &end, flags);
512 btrace_set_insn_history (btinfo, &begin, &end);
514 do_cleanups (uiout_cleanup);
517 /* The to_insn_history_from method of target record-btrace. */
520 record_btrace_insn_history_from (struct target_ops *self,
521 ULONGEST from, int size, int flags)
523 ULONGEST begin, end, context;
525 context = abs (size);
527 error (_("Bad record instruction-history-size."));
536 begin = from - context + 1;
541 end = from + context - 1;
543 /* Check for wrap-around. */
548 record_btrace_insn_history_range (self, begin, end, flags);
551 /* Print the instruction number range for a function call history line. */
554 btrace_call_history_insn_range (struct ui_out *uiout,
555 const struct btrace_function *bfun)
557 unsigned int begin, end, size;
559 size = VEC_length (btrace_insn_s, bfun->insn);
560 gdb_assert (size > 0);
562 begin = bfun->insn_offset;
563 end = begin + size - 1;
565 ui_out_field_uint (uiout, "insn begin", begin);
566 ui_out_text (uiout, ",");
567 ui_out_field_uint (uiout, "insn end", end);
570 /* Print the source line information for a function call history line. */
573 btrace_call_history_src_line (struct ui_out *uiout,
574 const struct btrace_function *bfun)
583 ui_out_field_string (uiout, "file",
584 symtab_to_filename_for_display (symbol_symtab (sym)));
586 begin = bfun->lbegin;
592 ui_out_text (uiout, ":");
593 ui_out_field_int (uiout, "min line", begin);
598 ui_out_text (uiout, ",");
599 ui_out_field_int (uiout, "max line", end);
602 /* Get the name of a branch trace function. */
605 btrace_get_bfun_name (const struct btrace_function *bfun)
607 struct minimal_symbol *msym;
617 return SYMBOL_PRINT_NAME (sym);
618 else if (msym != NULL)
619 return MSYMBOL_PRINT_NAME (msym);
624 /* Disassemble a section of the recorded function trace. */
627 btrace_call_history (struct ui_out *uiout,
628 const struct btrace_thread_info *btinfo,
629 const struct btrace_call_iterator *begin,
630 const struct btrace_call_iterator *end,
631 enum record_print_flag flags)
633 struct btrace_call_iterator it;
635 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
636 btrace_call_number (end));
638 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
640 const struct btrace_function *bfun;
641 struct minimal_symbol *msym;
644 bfun = btrace_call_get (&it);
648 /* Print the function index. */
649 ui_out_field_uint (uiout, "index", bfun->number);
650 ui_out_text (uiout, "\t");
652 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
654 int level = bfun->level + btinfo->level, i;
656 for (i = 0; i < level; ++i)
657 ui_out_text (uiout, " ");
661 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
662 else if (msym != NULL)
663 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
664 else if (!ui_out_is_mi_like_p (uiout))
665 ui_out_field_string (uiout, "function", "??");
667 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
669 ui_out_text (uiout, _("\tinst "));
670 btrace_call_history_insn_range (uiout, bfun);
673 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
675 ui_out_text (uiout, _("\tat "));
676 btrace_call_history_src_line (uiout, bfun);
679 ui_out_text (uiout, "\n");
683 /* The to_call_history method of target record-btrace. */
686 record_btrace_call_history (struct target_ops *self, int size, int flags)
688 struct btrace_thread_info *btinfo;
689 struct btrace_call_history *history;
690 struct btrace_call_iterator begin, end;
691 struct cleanup *uiout_cleanup;
692 struct ui_out *uiout;
693 unsigned int context, covered;
695 uiout = current_uiout;
696 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
698 context = abs (size);
700 error (_("Bad record function-call-history-size."));
702 btinfo = require_btrace ();
703 history = btinfo->call_history;
706 struct btrace_insn_iterator *replay;
708 DEBUG ("call-history (0x%x): %d", flags, size);
710 /* If we're replaying, we start at the replay position. Otherwise, we
711 start at the tail of the trace. */
712 replay = btinfo->replay;
715 begin.function = replay->function;
716 begin.btinfo = btinfo;
719 btrace_call_end (&begin, btinfo);
721 /* We start from here and expand in the requested direction. Then we
722 expand in the other direction, as well, to fill up any remaining
727 /* We want the current position covered, as well. */
728 covered = btrace_call_next (&end, 1);
729 covered += btrace_call_prev (&begin, context - covered);
730 covered += btrace_call_next (&end, context - covered);
734 covered = btrace_call_next (&end, context);
735 covered += btrace_call_prev (&begin, context- covered);
740 begin = history->begin;
743 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
744 btrace_call_number (&begin), btrace_call_number (&end));
749 covered = btrace_call_prev (&begin, context);
754 covered = btrace_call_next (&end, context);
759 btrace_call_history (uiout, btinfo, &begin, &end, flags);
763 printf_unfiltered (_("At the start of the branch trace record.\n"));
765 printf_unfiltered (_("At the end of the branch trace record.\n"));
768 btrace_set_call_history (btinfo, &begin, &end);
769 do_cleanups (uiout_cleanup);
772 /* The to_call_history_range method of target record-btrace. */
775 record_btrace_call_history_range (struct target_ops *self,
776 ULONGEST from, ULONGEST to, int flags)
778 struct btrace_thread_info *btinfo;
779 struct btrace_call_history *history;
780 struct btrace_call_iterator begin, end;
781 struct cleanup *uiout_cleanup;
782 struct ui_out *uiout;
783 unsigned int low, high;
786 uiout = current_uiout;
787 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
792 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
794 /* Check for wrap-arounds. */
795 if (low != from || high != to)
796 error (_("Bad range."));
799 error (_("Bad range."));
801 btinfo = require_btrace ();
803 found = btrace_find_call_by_number (&begin, btinfo, low);
805 error (_("Range out of bounds."));
807 found = btrace_find_call_by_number (&end, btinfo, high);
810 /* Silently truncate the range. */
811 btrace_call_end (&end, btinfo);
815 /* We want both begin and end to be inclusive. */
816 btrace_call_next (&end, 1);
819 btrace_call_history (uiout, btinfo, &begin, &end, flags);
820 btrace_set_call_history (btinfo, &begin, &end);
822 do_cleanups (uiout_cleanup);
825 /* The to_call_history_from method of target record-btrace. */
828 record_btrace_call_history_from (struct target_ops *self,
829 ULONGEST from, int size, int flags)
831 ULONGEST begin, end, context;
833 context = abs (size);
835 error (_("Bad record function-call-history-size."));
844 begin = from - context + 1;
849 end = from + context - 1;
851 /* Check for wrap-around. */
856 record_btrace_call_history_range (self, begin, end, flags);
859 /* The to_record_is_replaying method of target record-btrace. */
862 record_btrace_is_replaying (struct target_ops *self)
864 struct thread_info *tp;
866 ALL_NON_EXITED_THREADS (tp)
867 if (btrace_is_replaying (tp))
873 /* The to_xfer_partial method of target record-btrace. */
875 static enum target_xfer_status
876 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
877 const char *annex, gdb_byte *readbuf,
878 const gdb_byte *writebuf, ULONGEST offset,
879 ULONGEST len, ULONGEST *xfered_len)
881 struct target_ops *t;
883 /* Filter out requests that don't make sense during replay. */
884 if (replay_memory_access == replay_memory_access_read_only
885 && !record_btrace_generating_corefile
886 && record_btrace_is_replaying (ops))
890 case TARGET_OBJECT_MEMORY:
892 struct target_section *section;
894 /* We do not allow writing memory in general. */
895 if (writebuf != NULL)
898 return TARGET_XFER_UNAVAILABLE;
901 /* We allow reading readonly memory. */
902 section = target_section_by_addr (ops, offset);
905 /* Check if the section we found is readonly. */
906 if ((bfd_get_section_flags (section->the_bfd_section->owner,
907 section->the_bfd_section)
908 & SEC_READONLY) != 0)
910 /* Truncate the request to fit into this section. */
911 len = min (len, section->endaddr - offset);
917 return TARGET_XFER_UNAVAILABLE;
922 /* Forward the request. */
924 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
925 offset, len, xfered_len);
928 /* The to_insert_breakpoint method of target record-btrace. */
931 record_btrace_insert_breakpoint (struct target_ops *ops,
932 struct gdbarch *gdbarch,
933 struct bp_target_info *bp_tgt)
935 volatile struct gdb_exception except;
939 /* Inserting breakpoints requires accessing memory. Allow it for the
940 duration of this function. */
941 old = replay_memory_access;
942 replay_memory_access = replay_memory_access_read_write;
945 TRY_CATCH (except, RETURN_MASK_ALL)
946 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
948 replay_memory_access = old;
950 if (except.reason < 0)
951 throw_exception (except);
956 /* The to_remove_breakpoint method of target record-btrace. */
959 record_btrace_remove_breakpoint (struct target_ops *ops,
960 struct gdbarch *gdbarch,
961 struct bp_target_info *bp_tgt)
963 volatile struct gdb_exception except;
967 /* Removing breakpoints requires accessing memory. Allow it for the
968 duration of this function. */
969 old = replay_memory_access;
970 replay_memory_access = replay_memory_access_read_write;
973 TRY_CATCH (except, RETURN_MASK_ALL)
974 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
976 replay_memory_access = old;
978 if (except.reason < 0)
979 throw_exception (except);
984 /* The to_fetch_registers method of target record-btrace. */
987 record_btrace_fetch_registers (struct target_ops *ops,
988 struct regcache *regcache, int regno)
990 struct btrace_insn_iterator *replay;
991 struct thread_info *tp;
993 tp = find_thread_ptid (inferior_ptid);
994 gdb_assert (tp != NULL);
996 replay = tp->btrace.replay;
997 if (replay != NULL && !record_btrace_generating_corefile)
999 const struct btrace_insn *insn;
1000 struct gdbarch *gdbarch;
1003 gdbarch = get_regcache_arch (regcache);
1004 pcreg = gdbarch_pc_regnum (gdbarch);
1008 /* We can only provide the PC register. */
1009 if (regno >= 0 && regno != pcreg)
1012 insn = btrace_insn_get (replay);
1013 gdb_assert (insn != NULL);
1015 regcache_raw_supply (regcache, regno, &insn->pc);
1019 struct target_ops *t = ops->beneath;
1021 t->to_fetch_registers (t, regcache, regno);
1025 /* The to_store_registers method of target record-btrace. */
1028 record_btrace_store_registers (struct target_ops *ops,
1029 struct regcache *regcache, int regno)
1031 struct target_ops *t;
1033 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1034 error (_("This record target does not allow writing registers."));
1036 gdb_assert (may_write_registers != 0);
1039 t->to_store_registers (t, regcache, regno);
1042 /* The to_prepare_to_store method of target record-btrace. */
1045 record_btrace_prepare_to_store (struct target_ops *ops,
1046 struct regcache *regcache)
1048 struct target_ops *t;
1050 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1054 t->to_prepare_to_store (t, regcache);
1057 /* The branch trace frame cache. */
1059 struct btrace_frame_cache
1062 struct thread_info *tp;
1064 /* The frame info. */
1065 struct frame_info *frame;
1067 /* The branch trace function segment. */
1068 const struct btrace_function *bfun;
1071 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1073 static htab_t bfcache;
1075 /* hash_f for htab_create_alloc of bfcache. */
1078 bfcache_hash (const void *arg)
1080 const struct btrace_frame_cache *cache = arg;
1082 return htab_hash_pointer (cache->frame);
1085 /* eq_f for htab_create_alloc of bfcache. */
1088 bfcache_eq (const void *arg1, const void *arg2)
1090 const struct btrace_frame_cache *cache1 = arg1;
1091 const struct btrace_frame_cache *cache2 = arg2;
1093 return cache1->frame == cache2->frame;
1096 /* Create a new btrace frame cache. */
1098 static struct btrace_frame_cache *
1099 bfcache_new (struct frame_info *frame)
1101 struct btrace_frame_cache *cache;
1104 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1105 cache->frame = frame;
1107 slot = htab_find_slot (bfcache, cache, INSERT);
1108 gdb_assert (*slot == NULL);
1114 /* Extract the branch trace function from a branch trace frame. */
1116 static const struct btrace_function *
1117 btrace_get_frame_function (struct frame_info *frame)
1119 const struct btrace_frame_cache *cache;
1120 const struct btrace_function *bfun;
1121 struct btrace_frame_cache pattern;
1124 pattern.frame = frame;
1126 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1134 /* Implement stop_reason method for record_btrace_frame_unwind. */
1136 static enum unwind_stop_reason
1137 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1140 const struct btrace_frame_cache *cache;
1141 const struct btrace_function *bfun;
1143 cache = *this_cache;
1145 gdb_assert (bfun != NULL);
1147 if (bfun->up == NULL)
1148 return UNWIND_UNAVAILABLE;
1150 return UNWIND_NO_REASON;
1153 /* Implement this_id method for record_btrace_frame_unwind. */
1156 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1157 struct frame_id *this_id)
1159 const struct btrace_frame_cache *cache;
1160 const struct btrace_function *bfun;
1161 CORE_ADDR code, special;
1163 cache = *this_cache;
1166 gdb_assert (bfun != NULL);
1168 while (bfun->segment.prev != NULL)
1169 bfun = bfun->segment.prev;
1171 code = get_frame_func (this_frame);
1172 special = bfun->number;
1174 *this_id = frame_id_build_unavailable_stack_special (code, special);
1176 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1177 btrace_get_bfun_name (cache->bfun),
1178 core_addr_to_string_nz (this_id->code_addr),
1179 core_addr_to_string_nz (this_id->special_addr));
1182 /* Implement prev_register method for record_btrace_frame_unwind. */
1184 static struct value *
1185 record_btrace_frame_prev_register (struct frame_info *this_frame,
1189 const struct btrace_frame_cache *cache;
1190 const struct btrace_function *bfun, *caller;
1191 const struct btrace_insn *insn;
1192 struct gdbarch *gdbarch;
1196 gdbarch = get_frame_arch (this_frame);
1197 pcreg = gdbarch_pc_regnum (gdbarch);
1198 if (pcreg < 0 || regnum != pcreg)
1199 throw_error (NOT_AVAILABLE_ERROR,
1200 _("Registers are not available in btrace record history"));
1202 cache = *this_cache;
1204 gdb_assert (bfun != NULL);
1208 throw_error (NOT_AVAILABLE_ERROR,
1209 _("No caller in btrace record history"));
1211 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1213 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1218 insn = VEC_last (btrace_insn_s, caller->insn);
1221 pc += gdb_insn_length (gdbarch, pc);
1224 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1225 btrace_get_bfun_name (bfun), bfun->level,
1226 core_addr_to_string_nz (pc));
1228 return frame_unwind_got_address (this_frame, regnum, pc);
1231 /* Implement sniffer method for record_btrace_frame_unwind. */
1234 record_btrace_frame_sniffer (const struct frame_unwind *self,
1235 struct frame_info *this_frame,
1238 const struct btrace_function *bfun;
1239 struct btrace_frame_cache *cache;
1240 struct thread_info *tp;
1241 struct frame_info *next;
1243 /* THIS_FRAME does not contain a reference to its thread. */
1244 tp = find_thread_ptid (inferior_ptid);
1245 gdb_assert (tp != NULL);
1248 next = get_next_frame (this_frame);
1251 const struct btrace_insn_iterator *replay;
1253 replay = tp->btrace.replay;
1255 bfun = replay->function;
1259 const struct btrace_function *callee;
1261 callee = btrace_get_frame_function (next);
1262 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1269 DEBUG ("[frame] sniffed frame for %s on level %d",
1270 btrace_get_bfun_name (bfun), bfun->level);
1272 /* This is our frame. Initialize the frame cache. */
1273 cache = bfcache_new (this_frame);
1277 *this_cache = cache;
1281 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1284 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1285 struct frame_info *this_frame,
1288 const struct btrace_function *bfun, *callee;
1289 struct btrace_frame_cache *cache;
1290 struct frame_info *next;
1292 next = get_next_frame (this_frame);
1296 callee = btrace_get_frame_function (next);
1300 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1307 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1308 btrace_get_bfun_name (bfun), bfun->level);
1310 /* This is our frame. Initialize the frame cache. */
1311 cache = bfcache_new (this_frame);
1312 cache->tp = find_thread_ptid (inferior_ptid);
1315 *this_cache = cache;
1320 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1322 struct btrace_frame_cache *cache;
1327 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1328 gdb_assert (slot != NULL);
1330 htab_remove_elt (bfcache, cache);
1333 /* btrace recording does not store previous memory content, neither the stack
1334 frames content. Any unwinding would return errorneous results as the stack
1335 contents no longer matches the changed PC value restored from history.
1336 Therefore this unwinder reports any possibly unwound registers as
1339 const struct frame_unwind record_btrace_frame_unwind =
1342 record_btrace_frame_unwind_stop_reason,
1343 record_btrace_frame_this_id,
1344 record_btrace_frame_prev_register,
1346 record_btrace_frame_sniffer,
1347 record_btrace_frame_dealloc_cache
1350 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1353 record_btrace_frame_unwind_stop_reason,
1354 record_btrace_frame_this_id,
1355 record_btrace_frame_prev_register,
1357 record_btrace_tailcall_frame_sniffer,
1358 record_btrace_frame_dealloc_cache
1361 /* Implement the to_get_unwinder method. */
1363 static const struct frame_unwind *
1364 record_btrace_to_get_unwinder (struct target_ops *self)
1366 return &record_btrace_frame_unwind;
1369 /* Implement the to_get_tailcall_unwinder method. */
1371 static const struct frame_unwind *
1372 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1374 return &record_btrace_tailcall_frame_unwind;
1377 /* Indicate that TP should be resumed according to FLAG. */
1380 record_btrace_resume_thread (struct thread_info *tp,
1381 enum btrace_thread_flag flag)
1383 struct btrace_thread_info *btinfo;
1385 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1387 btinfo = &tp->btrace;
1389 if ((btinfo->flags & BTHR_MOVE) != 0)
1390 error (_("Thread already moving."));
1392 /* Fetch the latest branch trace. */
1395 btinfo->flags |= flag;
1398 /* Find the thread to resume given a PTID. */
1400 static struct thread_info *
1401 record_btrace_find_resume_thread (ptid_t ptid)
1403 struct thread_info *tp;
1405 /* When asked to resume everything, we pick the current thread. */
1406 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1407 ptid = inferior_ptid;
1409 return find_thread_ptid (ptid);
1412 /* Start replaying a thread. */
1414 static struct btrace_insn_iterator *
1415 record_btrace_start_replaying (struct thread_info *tp)
1417 volatile struct gdb_exception except;
1418 struct btrace_insn_iterator *replay;
1419 struct btrace_thread_info *btinfo;
1422 btinfo = &tp->btrace;
1425 /* We can't start replaying without trace. */
1426 if (btinfo->begin == NULL)
1429 /* Clear the executing flag to allow changes to the current frame.
1430 We are not actually running, yet. We just started a reverse execution
1431 command or a record goto command.
1432 For the latter, EXECUTING is false and this has no effect.
1433 For the former, EXECUTING is true and we're in to_wait, about to
1434 move the thread. Since we need to recompute the stack, we temporarily
1435 set EXECUTING to flase. */
1436 executing = is_executing (tp->ptid);
1437 set_executing (tp->ptid, 0);
1439 /* GDB stores the current frame_id when stepping in order to detects steps
1441 Since frames are computed differently when we're replaying, we need to
1442 recompute those stored frames and fix them up so we can still detect
1443 subroutines after we started replaying. */
1444 TRY_CATCH (except, RETURN_MASK_ALL)
1446 struct frame_info *frame;
1447 struct frame_id frame_id;
1448 int upd_step_frame_id, upd_step_stack_frame_id;
1450 /* The current frame without replaying - computed via normal unwind. */
1451 frame = get_current_frame ();
1452 frame_id = get_frame_id (frame);
1454 /* Check if we need to update any stepping-related frame id's. */
1455 upd_step_frame_id = frame_id_eq (frame_id,
1456 tp->control.step_frame_id);
1457 upd_step_stack_frame_id = frame_id_eq (frame_id,
1458 tp->control.step_stack_frame_id);
1460 /* We start replaying at the end of the branch trace. This corresponds
1461 to the current instruction. */
1462 replay = xmalloc (sizeof (*replay));
1463 btrace_insn_end (replay, btinfo);
1465 /* We're not replaying, yet. */
1466 gdb_assert (btinfo->replay == NULL);
1467 btinfo->replay = replay;
1469 /* Make sure we're not using any stale registers. */
1470 registers_changed_ptid (tp->ptid);
1472 /* The current frame with replaying - computed via btrace unwind. */
1473 frame = get_current_frame ();
1474 frame_id = get_frame_id (frame);
1476 /* Replace stepping related frames where necessary. */
1477 if (upd_step_frame_id)
1478 tp->control.step_frame_id = frame_id;
1479 if (upd_step_stack_frame_id)
1480 tp->control.step_stack_frame_id = frame_id;
1483 /* Restore the previous execution state. */
1484 set_executing (tp->ptid, executing);
1486 if (except.reason < 0)
1488 xfree (btinfo->replay);
1489 btinfo->replay = NULL;
1491 registers_changed_ptid (tp->ptid);
1493 throw_exception (except);
1499 /* Stop replaying a thread. */
1502 record_btrace_stop_replaying (struct thread_info *tp)
1504 struct btrace_thread_info *btinfo;
1506 btinfo = &tp->btrace;
1508 xfree (btinfo->replay);
1509 btinfo->replay = NULL;
1511 /* Make sure we're not leaving any stale registers. */
1512 registers_changed_ptid (tp->ptid);
1515 /* The to_resume method of target record-btrace. */
1518 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1519 enum gdb_signal signal)
1521 struct thread_info *tp, *other;
1522 enum btrace_thread_flag flag;
1524 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1526 /* Store the execution direction of the last resume. */
1527 record_btrace_resume_exec_dir = execution_direction;
1529 tp = record_btrace_find_resume_thread (ptid);
1531 error (_("Cannot find thread to resume."));
1533 /* Stop replaying other threads if the thread to resume is not replaying. */
1534 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1535 ALL_NON_EXITED_THREADS (other)
1536 record_btrace_stop_replaying (other);
1538 /* As long as we're not replaying, just forward the request. */
1539 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1542 return ops->to_resume (ops, ptid, step, signal);
1545 /* Compute the btrace thread flag for the requested move. */
1547 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1549 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1551 /* At the moment, we only move a single thread. We could also move
1552 all threads in parallel by single-stepping each resumed thread
1553 until the first runs into an event.
1554 When we do that, we would want to continue all other threads.
1555 For now, just resume one thread to not confuse to_wait. */
1556 record_btrace_resume_thread (tp, flag);
1558 /* We just indicate the resume intent here. The actual stepping happens in
1559 record_btrace_wait below. */
1561 /* Async support. */
1562 if (target_can_async_p ())
1564 target_async (inferior_event_handler, 0);
1565 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1569 /* Find a thread to move. */
1571 static struct thread_info *
1572 record_btrace_find_thread_to_move (ptid_t ptid)
1574 struct thread_info *tp;
1576 /* First check the parameter thread. */
1577 tp = find_thread_ptid (ptid);
1578 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1581 /* Otherwise, find one other thread that has been resumed. */
1582 ALL_NON_EXITED_THREADS (tp)
1583 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1589 /* Return a target_waitstatus indicating that we ran out of history. */
1591 static struct target_waitstatus
1592 btrace_step_no_history (void)
1594 struct target_waitstatus status;
1596 status.kind = TARGET_WAITKIND_NO_HISTORY;
1601 /* Return a target_waitstatus indicating that a step finished. */
1603 static struct target_waitstatus
1604 btrace_step_stopped (void)
1606 struct target_waitstatus status;
1608 status.kind = TARGET_WAITKIND_STOPPED;
1609 status.value.sig = GDB_SIGNAL_TRAP;
1614 /* Clear the record histories. */
1617 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1619 xfree (btinfo->insn_history);
1620 xfree (btinfo->call_history);
1622 btinfo->insn_history = NULL;
1623 btinfo->call_history = NULL;
1626 /* Step a single thread. */
1628 static struct target_waitstatus
1629 record_btrace_step_thread (struct thread_info *tp)
1631 struct btrace_insn_iterator *replay, end;
1632 struct btrace_thread_info *btinfo;
1633 struct address_space *aspace;
1634 struct inferior *inf;
1635 enum btrace_thread_flag flags;
1638 /* We can't step without an execution history. */
1639 if (btrace_is_empty (tp))
1640 return btrace_step_no_history ();
1642 btinfo = &tp->btrace;
1643 replay = btinfo->replay;
1645 flags = btinfo->flags & BTHR_MOVE;
1646 btinfo->flags &= ~BTHR_MOVE;
1648 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1653 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1656 /* We're done if we're not replaying. */
1658 return btrace_step_no_history ();
1660 /* We are always able to step at least once. */
1661 steps = btrace_insn_next (replay, 1);
1662 gdb_assert (steps == 1);
1664 /* Determine the end of the instruction trace. */
1665 btrace_insn_end (&end, btinfo);
1667 /* We stop replaying if we reached the end of the trace. */
1668 if (btrace_insn_cmp (replay, &end) == 0)
1669 record_btrace_stop_replaying (tp);
1671 return btrace_step_stopped ();
1674 /* Start replaying if we're not already doing so. */
1676 replay = record_btrace_start_replaying (tp);
1678 /* If we can't step any further, we reached the end of the history. */
1679 steps = btrace_insn_prev (replay, 1);
1681 return btrace_step_no_history ();
1683 return btrace_step_stopped ();
1686 /* We're done if we're not replaying. */
1688 return btrace_step_no_history ();
1690 inf = find_inferior_ptid (tp->ptid);
1691 aspace = inf->aspace;
1693 /* Determine the end of the instruction trace. */
1694 btrace_insn_end (&end, btinfo);
1698 const struct btrace_insn *insn;
1700 /* We are always able to step at least once. */
1701 steps = btrace_insn_next (replay, 1);
1702 gdb_assert (steps == 1);
1704 /* We stop replaying if we reached the end of the trace. */
1705 if (btrace_insn_cmp (replay, &end) == 0)
1707 record_btrace_stop_replaying (tp);
1708 return btrace_step_no_history ();
1711 insn = btrace_insn_get (replay);
1714 DEBUG ("stepping %d (%s) ... %s", tp->num,
1715 target_pid_to_str (tp->ptid),
1716 core_addr_to_string_nz (insn->pc));
1718 if (breakpoint_here_p (aspace, insn->pc))
1719 return btrace_step_stopped ();
1723 /* Start replaying if we're not already doing so. */
1725 replay = record_btrace_start_replaying (tp);
1727 inf = find_inferior_ptid (tp->ptid);
1728 aspace = inf->aspace;
1732 const struct btrace_insn *insn;
1734 /* If we can't step any further, we're done. */
1735 steps = btrace_insn_prev (replay, 1);
1737 return btrace_step_no_history ();
1739 insn = btrace_insn_get (replay);
1742 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1743 target_pid_to_str (tp->ptid),
1744 core_addr_to_string_nz (insn->pc));
1746 if (breakpoint_here_p (aspace, insn->pc))
1747 return btrace_step_stopped ();
1752 /* The to_wait method of target record-btrace. */
1755 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1756 struct target_waitstatus *status, int options)
1758 struct thread_info *tp, *other;
1760 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1762 /* As long as we're not replaying, just forward the request. */
1763 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1766 return ops->to_wait (ops, ptid, status, options);
1769 /* Let's find a thread to move. */
1770 tp = record_btrace_find_thread_to_move (ptid);
1773 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1775 status->kind = TARGET_WAITKIND_IGNORE;
1776 return minus_one_ptid;
1779 /* We only move a single thread. We're not able to correlate threads. */
1780 *status = record_btrace_step_thread (tp);
1782 /* Stop all other threads. */
1784 ALL_NON_EXITED_THREADS (other)
1785 other->btrace.flags &= ~BTHR_MOVE;
1787 /* Start record histories anew from the current position. */
1788 record_btrace_clear_histories (&tp->btrace);
1790 /* We moved the replay position but did not update registers. */
1791 registers_changed_ptid (tp->ptid);
1796 /* The to_can_execute_reverse method of target record-btrace. */
1799 record_btrace_can_execute_reverse (struct target_ops *self)
1804 /* The to_decr_pc_after_break method of target record-btrace. */
1807 record_btrace_decr_pc_after_break (struct target_ops *ops,
1808 struct gdbarch *gdbarch)
1810 /* When replaying, we do not actually execute the breakpoint instruction
1811 so there is no need to adjust the PC after hitting a breakpoint. */
1812 if (record_btrace_is_replaying (ops))
1815 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1818 /* The to_update_thread_list method of target record-btrace. */
1821 record_btrace_update_thread_list (struct target_ops *ops)
1823 /* We don't add or remove threads during replay. */
1824 if (record_btrace_is_replaying (ops))
1827 /* Forward the request. */
1829 ops->to_update_thread_list (ops);
1832 /* The to_thread_alive method of target record-btrace. */
1835 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1837 /* We don't add or remove threads during replay. */
1838 if (record_btrace_is_replaying (ops))
1839 return find_thread_ptid (ptid) != NULL;
1841 /* Forward the request. */
1843 return ops->to_thread_alive (ops, ptid);
1846 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1850 record_btrace_set_replay (struct thread_info *tp,
1851 const struct btrace_insn_iterator *it)
1853 struct btrace_thread_info *btinfo;
1855 btinfo = &tp->btrace;
1857 if (it == NULL || it->function == NULL)
1858 record_btrace_stop_replaying (tp);
1861 if (btinfo->replay == NULL)
1862 record_btrace_start_replaying (tp);
1863 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1866 *btinfo->replay = *it;
1867 registers_changed_ptid (tp->ptid);
1870 /* Start anew from the new replay position. */
1871 record_btrace_clear_histories (btinfo);
1874 /* The to_goto_record_begin method of target record-btrace. */
1877 record_btrace_goto_begin (struct target_ops *self)
1879 struct thread_info *tp;
1880 struct btrace_insn_iterator begin;
1882 tp = require_btrace_thread ();
1884 btrace_insn_begin (&begin, &tp->btrace);
1885 record_btrace_set_replay (tp, &begin);
1887 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1890 /* The to_goto_record_end method of target record-btrace. */
1893 record_btrace_goto_end (struct target_ops *ops)
1895 struct thread_info *tp;
1897 tp = require_btrace_thread ();
1899 record_btrace_set_replay (tp, NULL);
1901 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1904 /* The to_goto_record method of target record-btrace. */
1907 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1909 struct thread_info *tp;
1910 struct btrace_insn_iterator it;
1911 unsigned int number;
1916 /* Check for wrap-arounds. */
1918 error (_("Instruction number out of range."));
1920 tp = require_btrace_thread ();
1922 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1924 error (_("No such instruction."));
1926 record_btrace_set_replay (tp, &it);
1928 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1931 /* The to_execution_direction target method. */
1933 static enum exec_direction_kind
1934 record_btrace_execution_direction (struct target_ops *self)
1936 return record_btrace_resume_exec_dir;
1939 /* The to_prepare_to_generate_core target method. */
1942 record_btrace_prepare_to_generate_core (struct target_ops *self)
1944 record_btrace_generating_corefile = 1;
1947 /* The to_done_generating_core target method. */
1950 record_btrace_done_generating_core (struct target_ops *self)
1952 record_btrace_generating_corefile = 0;
1955 /* Initialize the record-btrace target ops. */
1958 init_record_btrace_ops (void)
1960 struct target_ops *ops;
1962 ops = &record_btrace_ops;
1963 ops->to_shortname = "record-btrace";
1964 ops->to_longname = "Branch tracing target";
1965 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1966 ops->to_open = record_btrace_open;
1967 ops->to_close = record_btrace_close;
1968 ops->to_async = record_btrace_async;
1969 ops->to_detach = record_detach;
1970 ops->to_disconnect = record_disconnect;
1971 ops->to_mourn_inferior = record_mourn_inferior;
1972 ops->to_kill = record_kill;
1973 ops->to_stop_recording = record_btrace_stop_recording;
1974 ops->to_info_record = record_btrace_info;
1975 ops->to_insn_history = record_btrace_insn_history;
1976 ops->to_insn_history_from = record_btrace_insn_history_from;
1977 ops->to_insn_history_range = record_btrace_insn_history_range;
1978 ops->to_call_history = record_btrace_call_history;
1979 ops->to_call_history_from = record_btrace_call_history_from;
1980 ops->to_call_history_range = record_btrace_call_history_range;
1981 ops->to_record_is_replaying = record_btrace_is_replaying;
1982 ops->to_xfer_partial = record_btrace_xfer_partial;
1983 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1984 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1985 ops->to_fetch_registers = record_btrace_fetch_registers;
1986 ops->to_store_registers = record_btrace_store_registers;
1987 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1988 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1989 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1990 ops->to_resume = record_btrace_resume;
1991 ops->to_wait = record_btrace_wait;
1992 ops->to_update_thread_list = record_btrace_update_thread_list;
1993 ops->to_thread_alive = record_btrace_thread_alive;
1994 ops->to_goto_record_begin = record_btrace_goto_begin;
1995 ops->to_goto_record_end = record_btrace_goto_end;
1996 ops->to_goto_record = record_btrace_goto;
1997 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1998 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1999 ops->to_execution_direction = record_btrace_execution_direction;
2000 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2001 ops->to_done_generating_core = record_btrace_done_generating_core;
2002 ops->to_stratum = record_stratum;
2003 ops->to_magic = OPS_MAGIC;
2006 /* Start recording in BTS format. */
2009 cmd_record_btrace_bts_start (char *args, int from_tty)
2011 volatile struct gdb_exception exception;
2013 if (args != NULL && *args != 0)
2014 error (_("Invalid argument."));
2016 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2018 TRY_CATCH (exception, RETURN_MASK_ALL)
2019 execute_command ("target record-btrace", from_tty);
2021 if (exception.error != 0)
2023 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2024 throw_exception (exception);
2028 /* Alias for "target record". */
2031 cmd_record_btrace_start (char *args, int from_tty)
2033 volatile struct gdb_exception exception;
2035 if (args != NULL && *args != 0)
2036 error (_("Invalid argument."));
2038 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2040 TRY_CATCH (exception, RETURN_MASK_ALL)
2041 execute_command ("target record-btrace", from_tty);
2043 if (exception.error == 0)
2046 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2047 throw_exception (exception);
2050 /* The "set record btrace" command. */
2053 cmd_set_record_btrace (char *args, int from_tty)
2055 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2058 /* The "show record btrace" command. */
2061 cmd_show_record_btrace (char *args, int from_tty)
2063 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2066 /* The "show record btrace replay-memory-access" command. */
2069 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2070 struct cmd_list_element *c, const char *value)
2072 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2073 replay_memory_access);
2076 void _initialize_record_btrace (void);
2078 /* Initialize btrace commands. */
2081 _initialize_record_btrace (void)
2083 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2084 _("Start branch trace recording."), &record_btrace_cmdlist,
2085 "record btrace ", 0, &record_cmdlist);
2086 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2088 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2090 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2091 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2092 This format may not be available on all processors."),
2093 &record_btrace_cmdlist);
2094 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2096 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2097 _("Set record options"), &set_record_btrace_cmdlist,
2098 "set record btrace ", 0, &set_record_cmdlist);
2100 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2101 _("Show record options"), &show_record_btrace_cmdlist,
2102 "show record btrace ", 0, &show_record_cmdlist);
2104 add_setshow_enum_cmd ("replay-memory-access", no_class,
2105 replay_memory_access_types, &replay_memory_access, _("\
2106 Set what memory accesses are allowed during replay."), _("\
2107 Show what memory accesses are allowed during replay."),
2108 _("Default is READ-ONLY.\n\n\
2109 The btrace record target does not trace data.\n\
2110 The memory therefore corresponds to the live target and not \
2111 to the current replay position.\n\n\
2112 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2113 When READ-WRITE, allow accesses to read-only and read-write memory during \
2115 NULL, cmd_show_replay_memory_access,
2116 &set_record_btrace_cmdlist,
2117 &show_record_btrace_cmdlist);
2119 init_record_btrace_ops ();
2120 add_target (&record_btrace_ops);
2122 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,