1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
51 #define DEBUG(msg, args...) \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
61 /* Update the branch trace for the current thread and return a pointer to its
64 Throws an error if there is no thread or no trace. This function never
67 static struct thread_info *
68 require_btrace_thread (void)
70 struct thread_info *tp;
74 tp = find_thread_ptid (inferior_ptid);
76 error (_("No thread."));
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
89 Throws an error if there is no thread or no trace. This function never
92 static struct btrace_thread_info *
95 struct thread_info *tp;
97 tp = require_btrace_thread ();
102 /* Enable branch tracing for one thread. Warn on errors. */
105 record_btrace_enable_warn (struct thread_info *tp)
107 volatile struct gdb_exception error;
109 TRY_CATCH (error, RETURN_MASK_ERROR)
112 if (error.message != NULL)
113 warning ("%s", error.message);
116 /* Callback function to disable branch tracing for one thread. */
119 record_btrace_disable_callback (void *arg)
121 struct thread_info *tp;
128 /* Enable automatic tracing of new threads. */
131 record_btrace_auto_enable (void)
133 DEBUG ("attach thread observer");
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
139 /* Disable automatic tracing of new threads. */
142 record_btrace_auto_disable (void)
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
148 DEBUG ("detach thread observer");
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
154 /* The to_open method of target record-btrace. */
157 record_btrace_open (char *args, int from_tty)
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
166 if (!target_has_execution)
167 error (_("The program is not being run."));
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
173 error (_("Record btrace can't debug inferior in non-stop mode."));
175 gdb_assert (record_btrace_thread_observer == NULL);
177 disable_chain = make_cleanup (null_cleanup, NULL);
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
183 make_cleanup (record_btrace_disable_callback, tp);
186 record_btrace_auto_enable ();
188 push_target (&record_btrace_ops);
190 observer_notify_record_changed (current_inferior (), 1);
192 discard_cleanups (disable_chain);
195 /* The to_stop_recording method of target record-btrace. */
198 record_btrace_stop_recording (struct target_ops *self)
200 struct thread_info *tp;
202 DEBUG ("stop recording");
204 record_btrace_auto_disable ();
207 if (tp->btrace.target != NULL)
211 /* The to_close method of target record-btrace. */
214 record_btrace_close (struct target_ops *self)
216 struct thread_info *tp;
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
225 btrace_teardown (tp);
228 /* The to_info_record method of target record-btrace. */
231 record_btrace_info (struct target_ops *self)
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
235 unsigned int insns, calls;
239 tp = find_thread_ptid (inferior_ptid);
241 error (_("No thread."));
248 btinfo = &tp->btrace;
250 if (!btrace_is_empty (tp))
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
257 calls = btrace_call_number (&call);
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
261 insns = btrace_insn_number (&insn);
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns, calls, tp->num,
266 target_pid_to_str (tp->ptid));
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
273 /* Print an unsigned int. */
276 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
278 ui_out_field_fmt (uiout, fld, "%u", val);
281 /* Disassemble a section of the recorded instruction trace. */
284 btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
288 struct gdbarch *gdbarch;
289 struct btrace_insn_iterator it;
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
294 gdbarch = target_gdbarch ();
296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
298 const struct btrace_insn *insn;
300 insn = btrace_insn_get (&it);
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
304 ui_out_text (uiout, "\t");
306 /* Disassembly with '/m' flag may not produce the expected result.
308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
312 /* The to_insn_history method of target record-btrace. */
315 record_btrace_insn_history (struct target_ops *self, int size, int flags)
317 struct btrace_thread_info *btinfo;
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
322 unsigned int context, covered;
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
327 context = abs (size);
329 error (_("Bad record instruction-history-size."));
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
335 struct btrace_insn_iterator *replay;
337 DEBUG ("insn-history (0x%x): %d", flags, size);
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
345 btrace_insn_end (&begin, btinfo);
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
366 begin = history->begin;
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
375 covered = btrace_insn_prev (&begin, context);
380 covered = btrace_insn_next (&end, context);
385 btrace_insn_history (uiout, &begin, &end, flags);
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
394 btrace_set_insn_history (btinfo, &begin, &end);
395 do_cleanups (uiout_cleanup);
398 /* The to_insn_history_range method of target record-btrace. */
401 record_btrace_insn_history_range (struct target_ops *self,
402 ULONGEST from, ULONGEST to, int flags)
404 struct btrace_thread_info *btinfo;
405 struct btrace_insn_history *history;
406 struct btrace_insn_iterator begin, end;
407 struct cleanup *uiout_cleanup;
408 struct ui_out *uiout;
409 unsigned int low, high;
412 uiout = current_uiout;
413 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
418 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
420 /* Check for wrap-arounds. */
421 if (low != from || high != to)
422 error (_("Bad range."));
425 error (_("Bad range."));
427 btinfo = require_btrace ();
429 found = btrace_find_insn_by_number (&begin, btinfo, low);
431 error (_("Range out of bounds."));
433 found = btrace_find_insn_by_number (&end, btinfo, high);
436 /* Silently truncate the range. */
437 btrace_insn_end (&end, btinfo);
441 /* We want both begin and end to be inclusive. */
442 btrace_insn_next (&end, 1);
445 btrace_insn_history (uiout, &begin, &end, flags);
446 btrace_set_insn_history (btinfo, &begin, &end);
448 do_cleanups (uiout_cleanup);
451 /* The to_insn_history_from method of target record-btrace. */
454 record_btrace_insn_history_from (struct target_ops *self,
455 ULONGEST from, int size, int flags)
457 ULONGEST begin, end, context;
459 context = abs (size);
461 error (_("Bad record instruction-history-size."));
470 begin = from - context + 1;
475 end = from + context - 1;
477 /* Check for wrap-around. */
482 record_btrace_insn_history_range (self, begin, end, flags);
485 /* Print the instruction number range for a function call history line. */
488 btrace_call_history_insn_range (struct ui_out *uiout,
489 const struct btrace_function *bfun)
491 unsigned int begin, end, size;
493 size = VEC_length (btrace_insn_s, bfun->insn);
494 gdb_assert (size > 0);
496 begin = bfun->insn_offset;
497 end = begin + size - 1;
499 ui_out_field_uint (uiout, "insn begin", begin);
500 ui_out_text (uiout, ",");
501 ui_out_field_uint (uiout, "insn end", end);
504 /* Print the source line information for a function call history line. */
507 btrace_call_history_src_line (struct ui_out *uiout,
508 const struct btrace_function *bfun)
517 ui_out_field_string (uiout, "file",
518 symtab_to_filename_for_display (sym->symtab));
520 begin = bfun->lbegin;
526 ui_out_text (uiout, ":");
527 ui_out_field_int (uiout, "min line", begin);
532 ui_out_text (uiout, ",");
533 ui_out_field_int (uiout, "max line", end);
536 /* Get the name of a branch trace function. */
539 btrace_get_bfun_name (const struct btrace_function *bfun)
541 struct minimal_symbol *msym;
551 return SYMBOL_PRINT_NAME (sym);
552 else if (msym != NULL)
553 return SYMBOL_PRINT_NAME (msym);
558 /* Disassemble a section of the recorded function trace. */
561 btrace_call_history (struct ui_out *uiout,
562 const struct btrace_thread_info *btinfo,
563 const struct btrace_call_iterator *begin,
564 const struct btrace_call_iterator *end,
565 enum record_print_flag flags)
567 struct btrace_call_iterator it;
569 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
570 btrace_call_number (end));
572 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
574 const struct btrace_function *bfun;
575 struct minimal_symbol *msym;
578 bfun = btrace_call_get (&it);
582 /* Print the function index. */
583 ui_out_field_uint (uiout, "index", bfun->number);
584 ui_out_text (uiout, "\t");
586 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
588 int level = bfun->level + btinfo->level, i;
590 for (i = 0; i < level; ++i)
591 ui_out_text (uiout, " ");
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
596 else if (msym != NULL)
597 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
598 else if (!ui_out_is_mi_like_p (uiout))
599 ui_out_field_string (uiout, "function", "??");
601 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
603 ui_out_text (uiout, _("\tinst "));
604 btrace_call_history_insn_range (uiout, bfun);
607 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
609 ui_out_text (uiout, _("\tat "));
610 btrace_call_history_src_line (uiout, bfun);
613 ui_out_text (uiout, "\n");
617 /* The to_call_history method of target record-btrace. */
620 record_btrace_call_history (struct target_ops *self, int size, int flags)
622 struct btrace_thread_info *btinfo;
623 struct btrace_call_history *history;
624 struct btrace_call_iterator begin, end;
625 struct cleanup *uiout_cleanup;
626 struct ui_out *uiout;
627 unsigned int context, covered;
629 uiout = current_uiout;
630 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
632 context = abs (size);
634 error (_("Bad record function-call-history-size."));
636 btinfo = require_btrace ();
637 history = btinfo->call_history;
640 struct btrace_insn_iterator *replay;
642 DEBUG ("call-history (0x%x): %d", flags, size);
644 /* If we're replaying, we start at the replay position. Otherwise, we
645 start at the tail of the trace. */
646 replay = btinfo->replay;
649 begin.function = replay->function;
650 begin.btinfo = btinfo;
653 btrace_call_end (&begin, btinfo);
655 /* We start from here and expand in the requested direction. Then we
656 expand in the other direction, as well, to fill up any remaining
661 /* We want the current position covered, as well. */
662 covered = btrace_call_next (&end, 1);
663 covered += btrace_call_prev (&begin, context - covered);
664 covered += btrace_call_next (&end, context - covered);
668 covered = btrace_call_next (&end, context);
669 covered += btrace_call_prev (&begin, context- covered);
674 begin = history->begin;
677 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
678 btrace_call_number (&begin), btrace_call_number (&end));
683 covered = btrace_call_prev (&begin, context);
688 covered = btrace_call_next (&end, context);
693 btrace_call_history (uiout, btinfo, &begin, &end, flags);
697 printf_unfiltered (_("At the start of the branch trace record.\n"));
699 printf_unfiltered (_("At the end of the branch trace record.\n"));
702 btrace_set_call_history (btinfo, &begin, &end);
703 do_cleanups (uiout_cleanup);
706 /* The to_call_history_range method of target record-btrace. */
709 record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
711 struct btrace_thread_info *btinfo;
712 struct btrace_call_history *history;
713 struct btrace_call_iterator begin, end;
714 struct cleanup *uiout_cleanup;
715 struct ui_out *uiout;
716 unsigned int low, high;
719 uiout = current_uiout;
720 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
725 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
727 /* Check for wrap-arounds. */
728 if (low != from || high != to)
729 error (_("Bad range."));
732 error (_("Bad range."));
734 btinfo = require_btrace ();
736 found = btrace_find_call_by_number (&begin, btinfo, low);
738 error (_("Range out of bounds."));
740 found = btrace_find_call_by_number (&end, btinfo, high);
743 /* Silently truncate the range. */
744 btrace_call_end (&end, btinfo);
748 /* We want both begin and end to be inclusive. */
749 btrace_call_next (&end, 1);
752 btrace_call_history (uiout, btinfo, &begin, &end, flags);
753 btrace_set_call_history (btinfo, &begin, &end);
755 do_cleanups (uiout_cleanup);
758 /* The to_call_history_from method of target record-btrace. */
761 record_btrace_call_history_from (ULONGEST from, int size, int flags)
763 ULONGEST begin, end, context;
765 context = abs (size);
767 error (_("Bad record function-call-history-size."));
776 begin = from - context + 1;
781 end = from + context - 1;
783 /* Check for wrap-around. */
788 record_btrace_call_history_range (begin, end, flags);
791 /* The to_record_is_replaying method of target record-btrace. */
794 record_btrace_is_replaying (struct target_ops *self)
796 struct thread_info *tp;
799 if (btrace_is_replaying (tp))
805 /* The to_xfer_partial method of target record-btrace. */
807 static enum target_xfer_status
808 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
809 const char *annex, gdb_byte *readbuf,
810 const gdb_byte *writebuf, ULONGEST offset,
811 ULONGEST len, ULONGEST *xfered_len)
813 struct target_ops *t;
815 /* Filter out requests that don't make sense during replay. */
816 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
820 case TARGET_OBJECT_MEMORY:
822 struct target_section *section;
824 /* We do not allow writing memory in general. */
825 if (writebuf != NULL)
828 return TARGET_XFER_E_UNAVAILABLE;
831 /* We allow reading readonly memory. */
832 section = target_section_by_addr (ops, offset);
835 /* Check if the section we found is readonly. */
836 if ((bfd_get_section_flags (section->the_bfd_section->owner,
837 section->the_bfd_section)
838 & SEC_READONLY) != 0)
840 /* Truncate the request to fit into this section. */
841 len = min (len, section->endaddr - offset);
847 return TARGET_XFER_E_UNAVAILABLE;
852 /* Forward the request. */
853 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
854 if (ops->to_xfer_partial != NULL)
855 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
856 offset, len, xfered_len);
859 return TARGET_XFER_E_UNAVAILABLE;
862 /* The to_insert_breakpoint method of target record-btrace. */
865 record_btrace_insert_breakpoint (struct target_ops *ops,
866 struct gdbarch *gdbarch,
867 struct bp_target_info *bp_tgt)
869 volatile struct gdb_exception except;
872 /* Inserting breakpoints requires accessing memory. Allow it for the
873 duration of this function. */
874 old = record_btrace_allow_memory_access;
875 record_btrace_allow_memory_access = 1;
878 TRY_CATCH (except, RETURN_MASK_ALL)
879 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
881 record_btrace_allow_memory_access = old;
883 if (except.reason < 0)
884 throw_exception (except);
889 /* The to_remove_breakpoint method of target record-btrace. */
892 record_btrace_remove_breakpoint (struct target_ops *ops,
893 struct gdbarch *gdbarch,
894 struct bp_target_info *bp_tgt)
896 volatile struct gdb_exception except;
899 /* Removing breakpoints requires accessing memory. Allow it for the
900 duration of this function. */
901 old = record_btrace_allow_memory_access;
902 record_btrace_allow_memory_access = 1;
905 TRY_CATCH (except, RETURN_MASK_ALL)
906 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
908 record_btrace_allow_memory_access = old;
910 if (except.reason < 0)
911 throw_exception (except);
916 /* The to_fetch_registers method of target record-btrace. */
919 record_btrace_fetch_registers (struct target_ops *ops,
920 struct regcache *regcache, int regno)
922 struct btrace_insn_iterator *replay;
923 struct thread_info *tp;
925 tp = find_thread_ptid (inferior_ptid);
926 gdb_assert (tp != NULL);
928 replay = tp->btrace.replay;
931 const struct btrace_insn *insn;
932 struct gdbarch *gdbarch;
935 gdbarch = get_regcache_arch (regcache);
936 pcreg = gdbarch_pc_regnum (gdbarch);
940 /* We can only provide the PC register. */
941 if (regno >= 0 && regno != pcreg)
944 insn = btrace_insn_get (replay);
945 gdb_assert (insn != NULL);
947 regcache_raw_supply (regcache, regno, &insn->pc);
951 struct target_ops *t;
953 for (t = ops->beneath; t != NULL; t = t->beneath)
954 if (t->to_fetch_registers != NULL)
956 t->to_fetch_registers (t, regcache, regno);
962 /* The to_store_registers method of target record-btrace. */
965 record_btrace_store_registers (struct target_ops *ops,
966 struct regcache *regcache, int regno)
968 struct target_ops *t;
970 if (record_btrace_is_replaying (ops))
971 error (_("This record target does not allow writing registers."));
973 gdb_assert (may_write_registers != 0);
975 for (t = ops->beneath; t != NULL; t = t->beneath)
976 if (t->to_store_registers != NULL)
978 t->to_store_registers (t, regcache, regno);
985 /* The to_prepare_to_store method of target record-btrace. */
988 record_btrace_prepare_to_store (struct target_ops *ops,
989 struct regcache *regcache)
991 struct target_ops *t;
993 if (record_btrace_is_replaying (ops))
996 for (t = ops->beneath; t != NULL; t = t->beneath)
997 if (t->to_prepare_to_store != NULL)
999 t->to_prepare_to_store (t, regcache);
1004 /* The branch trace frame cache. */
1006 struct btrace_frame_cache
1009 struct thread_info *tp;
1011 /* The frame info. */
1012 struct frame_info *frame;
1014 /* The branch trace function segment. */
1015 const struct btrace_function *bfun;
1018 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1020 static htab_t bfcache;
1022 /* hash_f for htab_create_alloc of bfcache. */
1025 bfcache_hash (const void *arg)
1027 const struct btrace_frame_cache *cache = arg;
1029 return htab_hash_pointer (cache->frame);
1032 /* eq_f for htab_create_alloc of bfcache. */
1035 bfcache_eq (const void *arg1, const void *arg2)
1037 const struct btrace_frame_cache *cache1 = arg1;
1038 const struct btrace_frame_cache *cache2 = arg2;
1040 return cache1->frame == cache2->frame;
1043 /* Create a new btrace frame cache. */
1045 static struct btrace_frame_cache *
1046 bfcache_new (struct frame_info *frame)
1048 struct btrace_frame_cache *cache;
1051 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1052 cache->frame = frame;
1054 slot = htab_find_slot (bfcache, cache, INSERT);
1055 gdb_assert (*slot == NULL);
1061 /* Extract the branch trace function from a branch trace frame. */
1063 static const struct btrace_function *
1064 btrace_get_frame_function (struct frame_info *frame)
1066 const struct btrace_frame_cache *cache;
1067 const struct btrace_function *bfun;
1068 struct btrace_frame_cache pattern;
1071 pattern.frame = frame;
1073 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1081 /* Implement stop_reason method for record_btrace_frame_unwind. */
1083 static enum unwind_stop_reason
1084 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1087 const struct btrace_frame_cache *cache;
1088 const struct btrace_function *bfun;
1090 cache = *this_cache;
1092 gdb_assert (bfun != NULL);
1094 if (bfun->up == NULL)
1095 return UNWIND_UNAVAILABLE;
1097 return UNWIND_NO_REASON;
1100 /* Implement this_id method for record_btrace_frame_unwind. */
1103 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1104 struct frame_id *this_id)
1106 const struct btrace_frame_cache *cache;
1107 const struct btrace_function *bfun;
1108 CORE_ADDR code, special;
1110 cache = *this_cache;
1113 gdb_assert (bfun != NULL);
1115 while (bfun->segment.prev != NULL)
1116 bfun = bfun->segment.prev;
1118 code = get_frame_func (this_frame);
1119 special = bfun->number;
1121 *this_id = frame_id_build_unavailable_stack_special (code, special);
1123 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1124 btrace_get_bfun_name (cache->bfun),
1125 core_addr_to_string_nz (this_id->code_addr),
1126 core_addr_to_string_nz (this_id->special_addr));
1129 /* Implement prev_register method for record_btrace_frame_unwind. */
1131 static struct value *
1132 record_btrace_frame_prev_register (struct frame_info *this_frame,
1136 const struct btrace_frame_cache *cache;
1137 const struct btrace_function *bfun, *caller;
1138 const struct btrace_insn *insn;
1139 struct gdbarch *gdbarch;
1143 gdbarch = get_frame_arch (this_frame);
1144 pcreg = gdbarch_pc_regnum (gdbarch);
1145 if (pcreg < 0 || regnum != pcreg)
1146 throw_error (NOT_AVAILABLE_ERROR,
1147 _("Registers are not available in btrace record history"));
1149 cache = *this_cache;
1151 gdb_assert (bfun != NULL);
1155 throw_error (NOT_AVAILABLE_ERROR,
1156 _("No caller in btrace record history"));
1158 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1160 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1165 insn = VEC_last (btrace_insn_s, caller->insn);
1168 pc += gdb_insn_length (gdbarch, pc);
1171 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1172 btrace_get_bfun_name (bfun), bfun->level,
1173 core_addr_to_string_nz (pc));
1175 return frame_unwind_got_address (this_frame, regnum, pc);
1178 /* Implement sniffer method for record_btrace_frame_unwind. */
1181 record_btrace_frame_sniffer (const struct frame_unwind *self,
1182 struct frame_info *this_frame,
1185 const struct btrace_function *bfun;
1186 struct btrace_frame_cache *cache;
1187 struct thread_info *tp;
1188 struct frame_info *next;
1190 /* THIS_FRAME does not contain a reference to its thread. */
1191 tp = find_thread_ptid (inferior_ptid);
1192 gdb_assert (tp != NULL);
1195 next = get_next_frame (this_frame);
1198 const struct btrace_insn_iterator *replay;
1200 replay = tp->btrace.replay;
1202 bfun = replay->function;
1206 const struct btrace_function *callee;
1208 callee = btrace_get_frame_function (next);
1209 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1216 DEBUG ("[frame] sniffed frame for %s on level %d",
1217 btrace_get_bfun_name (bfun), bfun->level);
1219 /* This is our frame. Initialize the frame cache. */
1220 cache = bfcache_new (this_frame);
1224 *this_cache = cache;
1228 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1231 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1232 struct frame_info *this_frame,
1235 const struct btrace_function *bfun, *callee;
1236 struct btrace_frame_cache *cache;
1237 struct frame_info *next;
1239 next = get_next_frame (this_frame);
1243 callee = btrace_get_frame_function (next);
1247 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1254 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1255 btrace_get_bfun_name (bfun), bfun->level);
1257 /* This is our frame. Initialize the frame cache. */
1258 cache = bfcache_new (this_frame);
1259 cache->tp = find_thread_ptid (inferior_ptid);
1262 *this_cache = cache;
1267 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1269 struct btrace_frame_cache *cache;
1274 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1275 gdb_assert (slot != NULL);
1277 htab_remove_elt (bfcache, cache);
1280 /* btrace recording does not store previous memory content, neither the stack
1281 frames content. Any unwinding would return errorneous results as the stack
1282 contents no longer matches the changed PC value restored from history.
1283 Therefore this unwinder reports any possibly unwound registers as
1286 const struct frame_unwind record_btrace_frame_unwind =
1289 record_btrace_frame_unwind_stop_reason,
1290 record_btrace_frame_this_id,
1291 record_btrace_frame_prev_register,
1293 record_btrace_frame_sniffer,
1294 record_btrace_frame_dealloc_cache
1297 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1300 record_btrace_frame_unwind_stop_reason,
1301 record_btrace_frame_this_id,
1302 record_btrace_frame_prev_register,
1304 record_btrace_tailcall_frame_sniffer,
1305 record_btrace_frame_dealloc_cache
1308 /* Indicate that TP should be resumed according to FLAG. */
1311 record_btrace_resume_thread (struct thread_info *tp,
1312 enum btrace_thread_flag flag)
1314 struct btrace_thread_info *btinfo;
1316 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1318 btinfo = &tp->btrace;
1320 if ((btinfo->flags & BTHR_MOVE) != 0)
1321 error (_("Thread already moving."));
1323 /* Fetch the latest branch trace. */
1326 btinfo->flags |= flag;
1329 /* Find the thread to resume given a PTID. */
1331 static struct thread_info *
1332 record_btrace_find_resume_thread (ptid_t ptid)
1334 struct thread_info *tp;
1336 /* When asked to resume everything, we pick the current thread. */
1337 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1338 ptid = inferior_ptid;
1340 return find_thread_ptid (ptid);
1343 /* Start replaying a thread. */
1345 static struct btrace_insn_iterator *
1346 record_btrace_start_replaying (struct thread_info *tp)
1348 volatile struct gdb_exception except;
1349 struct btrace_insn_iterator *replay;
1350 struct btrace_thread_info *btinfo;
1353 btinfo = &tp->btrace;
1356 /* We can't start replaying without trace. */
1357 if (btinfo->begin == NULL)
1360 /* Clear the executing flag to allow changes to the current frame.
1361 We are not actually running, yet. We just started a reverse execution
1362 command or a record goto command.
1363 For the latter, EXECUTING is false and this has no effect.
1364 For the former, EXECUTING is true and we're in to_wait, about to
1365 move the thread. Since we need to recompute the stack, we temporarily
1366 set EXECUTING to flase. */
1367 executing = is_executing (tp->ptid);
1368 set_executing (tp->ptid, 0);
1370 /* GDB stores the current frame_id when stepping in order to detects steps
1372 Since frames are computed differently when we're replaying, we need to
1373 recompute those stored frames and fix them up so we can still detect
1374 subroutines after we started replaying. */
1375 TRY_CATCH (except, RETURN_MASK_ALL)
1377 struct frame_info *frame;
1378 struct frame_id frame_id;
1379 int upd_step_frame_id, upd_step_stack_frame_id;
1381 /* The current frame without replaying - computed via normal unwind. */
1382 frame = get_current_frame ();
1383 frame_id = get_frame_id (frame);
1385 /* Check if we need to update any stepping-related frame id's. */
1386 upd_step_frame_id = frame_id_eq (frame_id,
1387 tp->control.step_frame_id);
1388 upd_step_stack_frame_id = frame_id_eq (frame_id,
1389 tp->control.step_stack_frame_id);
1391 /* We start replaying at the end of the branch trace. This corresponds
1392 to the current instruction. */
1393 replay = xmalloc (sizeof (*replay));
1394 btrace_insn_end (replay, btinfo);
1396 /* We're not replaying, yet. */
1397 gdb_assert (btinfo->replay == NULL);
1398 btinfo->replay = replay;
1400 /* Make sure we're not using any stale registers. */
1401 registers_changed_ptid (tp->ptid);
1403 /* The current frame with replaying - computed via btrace unwind. */
1404 frame = get_current_frame ();
1405 frame_id = get_frame_id (frame);
1407 /* Replace stepping related frames where necessary. */
1408 if (upd_step_frame_id)
1409 tp->control.step_frame_id = frame_id;
1410 if (upd_step_stack_frame_id)
1411 tp->control.step_stack_frame_id = frame_id;
1414 /* Restore the previous execution state. */
1415 set_executing (tp->ptid, executing);
1417 if (except.reason < 0)
1419 xfree (btinfo->replay);
1420 btinfo->replay = NULL;
1422 registers_changed_ptid (tp->ptid);
1424 throw_exception (except);
1430 /* Stop replaying a thread. */
1433 record_btrace_stop_replaying (struct thread_info *tp)
1435 struct btrace_thread_info *btinfo;
1437 btinfo = &tp->btrace;
1439 xfree (btinfo->replay);
1440 btinfo->replay = NULL;
1442 /* Make sure we're not leaving any stale registers. */
1443 registers_changed_ptid (tp->ptid);
1446 /* The to_resume method of target record-btrace. */
1449 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1450 enum gdb_signal signal)
1452 struct thread_info *tp, *other;
1453 enum btrace_thread_flag flag;
1455 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1457 tp = record_btrace_find_resume_thread (ptid);
1459 error (_("Cannot find thread to resume."));
1461 /* Stop replaying other threads if the thread to resume is not replaying. */
1462 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1464 record_btrace_stop_replaying (other);
1466 /* As long as we're not replaying, just forward the request. */
1467 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1469 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1470 if (ops->to_resume != NULL)
1471 return ops->to_resume (ops, ptid, step, signal);
1473 error (_("Cannot find target for stepping."));
1476 /* Compute the btrace thread flag for the requested move. */
1478 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1480 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1482 /* At the moment, we only move a single thread. We could also move
1483 all threads in parallel by single-stepping each resumed thread
1484 until the first runs into an event.
1485 When we do that, we would want to continue all other threads.
1486 For now, just resume one thread to not confuse to_wait. */
1487 record_btrace_resume_thread (tp, flag);
1489 /* We just indicate the resume intent here. The actual stepping happens in
1490 record_btrace_wait below. */
1493 /* Find a thread to move. */
1495 static struct thread_info *
1496 record_btrace_find_thread_to_move (ptid_t ptid)
1498 struct thread_info *tp;
1500 /* First check the parameter thread. */
1501 tp = find_thread_ptid (ptid);
1502 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1505 /* Otherwise, find one other thread that has been resumed. */
1507 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1513 /* Return a target_waitstatus indicating that we ran out of history. */
1515 static struct target_waitstatus
1516 btrace_step_no_history (void)
1518 struct target_waitstatus status;
1520 status.kind = TARGET_WAITKIND_NO_HISTORY;
1525 /* Return a target_waitstatus indicating that a step finished. */
1527 static struct target_waitstatus
1528 btrace_step_stopped (void)
1530 struct target_waitstatus status;
1532 status.kind = TARGET_WAITKIND_STOPPED;
1533 status.value.sig = GDB_SIGNAL_TRAP;
1538 /* Clear the record histories. */
1541 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1543 xfree (btinfo->insn_history);
1544 xfree (btinfo->call_history);
1546 btinfo->insn_history = NULL;
1547 btinfo->call_history = NULL;
1550 /* Step a single thread. */
1552 static struct target_waitstatus
1553 record_btrace_step_thread (struct thread_info *tp)
1555 struct btrace_insn_iterator *replay, end;
1556 struct btrace_thread_info *btinfo;
1557 struct address_space *aspace;
1558 struct inferior *inf;
1559 enum btrace_thread_flag flags;
1562 btinfo = &tp->btrace;
1563 replay = btinfo->replay;
1565 flags = btinfo->flags & BTHR_MOVE;
1566 btinfo->flags &= ~BTHR_MOVE;
1568 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1573 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1576 /* We're done if we're not replaying. */
1578 return btrace_step_no_history ();
1580 /* We are always able to step at least once. */
1581 steps = btrace_insn_next (replay, 1);
1582 gdb_assert (steps == 1);
1584 /* Determine the end of the instruction trace. */
1585 btrace_insn_end (&end, btinfo);
1587 /* We stop replaying if we reached the end of the trace. */
1588 if (btrace_insn_cmp (replay, &end) == 0)
1589 record_btrace_stop_replaying (tp);
1591 return btrace_step_stopped ();
1594 /* Start replaying if we're not already doing so. */
1596 replay = record_btrace_start_replaying (tp);
1598 /* If we can't step any further, we reached the end of the history. */
1599 steps = btrace_insn_prev (replay, 1);
1601 return btrace_step_no_history ();
1603 return btrace_step_stopped ();
1606 /* We're done if we're not replaying. */
1608 return btrace_step_no_history ();
1610 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1611 aspace = inf->aspace;
1613 /* Determine the end of the instruction trace. */
1614 btrace_insn_end (&end, btinfo);
1618 const struct btrace_insn *insn;
1620 /* We are always able to step at least once. */
1621 steps = btrace_insn_next (replay, 1);
1622 gdb_assert (steps == 1);
1624 /* We stop replaying if we reached the end of the trace. */
1625 if (btrace_insn_cmp (replay, &end) == 0)
1627 record_btrace_stop_replaying (tp);
1628 return btrace_step_no_history ();
1631 insn = btrace_insn_get (replay);
1634 DEBUG ("stepping %d (%s) ... %s", tp->num,
1635 target_pid_to_str (tp->ptid),
1636 core_addr_to_string_nz (insn->pc));
1638 if (breakpoint_here_p (aspace, insn->pc))
1639 return btrace_step_stopped ();
1643 /* Start replaying if we're not already doing so. */
1645 replay = record_btrace_start_replaying (tp);
1647 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1648 aspace = inf->aspace;
1652 const struct btrace_insn *insn;
1654 /* If we can't step any further, we're done. */
1655 steps = btrace_insn_prev (replay, 1);
1657 return btrace_step_no_history ();
1659 insn = btrace_insn_get (replay);
1662 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1663 target_pid_to_str (tp->ptid),
1664 core_addr_to_string_nz (insn->pc));
1666 if (breakpoint_here_p (aspace, insn->pc))
1667 return btrace_step_stopped ();
1672 /* The to_wait method of target record-btrace. */
1675 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1676 struct target_waitstatus *status, int options)
1678 struct thread_info *tp, *other;
1680 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1682 /* As long as we're not replaying, just forward the request. */
1683 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1685 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1686 if (ops->to_wait != NULL)
1687 return ops->to_wait (ops, ptid, status, options);
1689 error (_("Cannot find target for waiting."));
1692 /* Let's find a thread to move. */
1693 tp = record_btrace_find_thread_to_move (ptid);
1696 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1698 status->kind = TARGET_WAITKIND_IGNORE;
1699 return minus_one_ptid;
1702 /* We only move a single thread. We're not able to correlate threads. */
1703 *status = record_btrace_step_thread (tp);
1705 /* Stop all other threads. */
1708 other->btrace.flags &= ~BTHR_MOVE;
1710 /* Start record histories anew from the current position. */
1711 record_btrace_clear_histories (&tp->btrace);
1713 /* We moved the replay position but did not update registers. */
1714 registers_changed_ptid (tp->ptid);
1719 /* The to_can_execute_reverse method of target record-btrace. */
1722 record_btrace_can_execute_reverse (struct target_ops *self)
1727 /* The to_decr_pc_after_break method of target record-btrace. */
1730 record_btrace_decr_pc_after_break (struct target_ops *ops,
1731 struct gdbarch *gdbarch)
1733 /* When replaying, we do not actually execute the breakpoint instruction
1734 so there is no need to adjust the PC after hitting a breakpoint. */
1735 if (record_btrace_is_replaying (ops))
1738 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
1741 /* The to_find_new_threads method of target record-btrace. */
1744 record_btrace_find_new_threads (struct target_ops *ops)
1746 /* Don't expect new threads if we're replaying. */
1747 if (record_btrace_is_replaying (ops))
1750 /* Forward the request. */
1751 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1752 if (ops->to_find_new_threads != NULL)
1754 ops->to_find_new_threads (ops);
1759 /* The to_thread_alive method of target record-btrace. */
1762 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1764 /* We don't add or remove threads during replay. */
1765 if (record_btrace_is_replaying (ops))
1766 return find_thread_ptid (ptid) != NULL;
1768 /* Forward the request. */
1769 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1770 if (ops->to_thread_alive != NULL)
1771 return ops->to_thread_alive (ops, ptid);
1776 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1780 record_btrace_set_replay (struct thread_info *tp,
1781 const struct btrace_insn_iterator *it)
1783 struct btrace_thread_info *btinfo;
1785 btinfo = &tp->btrace;
1787 if (it == NULL || it->function == NULL)
1788 record_btrace_stop_replaying (tp);
1791 if (btinfo->replay == NULL)
1792 record_btrace_start_replaying (tp);
1793 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1796 *btinfo->replay = *it;
1797 registers_changed_ptid (tp->ptid);
1800 /* Start anew from the new replay position. */
1801 record_btrace_clear_histories (btinfo);
1804 /* The to_goto_record_begin method of target record-btrace. */
1807 record_btrace_goto_begin (struct target_ops *self)
1809 struct thread_info *tp;
1810 struct btrace_insn_iterator begin;
1812 tp = require_btrace_thread ();
1814 btrace_insn_begin (&begin, &tp->btrace);
1815 record_btrace_set_replay (tp, &begin);
1817 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1820 /* The to_goto_record_end method of target record-btrace. */
1823 record_btrace_goto_end (struct target_ops *ops)
1825 struct thread_info *tp;
1827 tp = require_btrace_thread ();
1829 record_btrace_set_replay (tp, NULL);
1831 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1834 /* The to_goto_record method of target record-btrace. */
1837 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1839 struct thread_info *tp;
1840 struct btrace_insn_iterator it;
1841 unsigned int number;
1846 /* Check for wrap-arounds. */
1848 error (_("Instruction number out of range."));
1850 tp = require_btrace_thread ();
1852 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1854 error (_("No such instruction."));
1856 record_btrace_set_replay (tp, &it);
1858 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1861 /* Initialize the record-btrace target ops. */
1864 init_record_btrace_ops (void)
1866 struct target_ops *ops;
1868 ops = &record_btrace_ops;
1869 ops->to_shortname = "record-btrace";
1870 ops->to_longname = "Branch tracing target";
1871 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1872 ops->to_open = record_btrace_open;
1873 ops->to_close = record_btrace_close;
1874 ops->to_detach = record_detach;
1875 ops->to_disconnect = record_disconnect;
1876 ops->to_mourn_inferior = record_mourn_inferior;
1877 ops->to_kill = record_kill;
1878 ops->to_create_inferior = find_default_create_inferior;
1879 ops->to_stop_recording = record_btrace_stop_recording;
1880 ops->to_info_record = record_btrace_info;
1881 ops->to_insn_history = record_btrace_insn_history;
1882 ops->to_insn_history_from = record_btrace_insn_history_from;
1883 ops->to_insn_history_range = record_btrace_insn_history_range;
1884 ops->to_call_history = record_btrace_call_history;
1885 ops->to_call_history_from = record_btrace_call_history_from;
1886 ops->to_call_history_range = record_btrace_call_history_range;
1887 ops->to_record_is_replaying = record_btrace_is_replaying;
1888 ops->to_xfer_partial = record_btrace_xfer_partial;
1889 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1890 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1891 ops->to_fetch_registers = record_btrace_fetch_registers;
1892 ops->to_store_registers = record_btrace_store_registers;
1893 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1894 ops->to_get_unwinder = &record_btrace_frame_unwind;
1895 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
1896 ops->to_resume = record_btrace_resume;
1897 ops->to_wait = record_btrace_wait;
1898 ops->to_find_new_threads = record_btrace_find_new_threads;
1899 ops->to_thread_alive = record_btrace_thread_alive;
1900 ops->to_goto_record_begin = record_btrace_goto_begin;
1901 ops->to_goto_record_end = record_btrace_goto_end;
1902 ops->to_goto_record = record_btrace_goto;
1903 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1904 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1905 ops->to_stratum = record_stratum;
1906 ops->to_magic = OPS_MAGIC;
1909 /* Alias for "target record". */
1912 cmd_record_btrace_start (char *args, int from_tty)
1914 if (args != NULL && *args != 0)
1915 error (_("Invalid argument."));
1917 execute_command ("target record-btrace", from_tty);
1920 void _initialize_record_btrace (void);
1922 /* Initialize btrace commands. */
1925 _initialize_record_btrace (void)
1927 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1928 _("Start branch trace recording."),
1930 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1932 init_record_btrace_ops ();
1933 add_target (&record_btrace_ops);
1935 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,