1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
51 #define DEBUG(msg, args...) \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
61 /* Update the branch trace for the current thread and return a pointer to its
64 Throws an error if there is no thread or no trace. This function never
67 static struct thread_info *
68 require_btrace_thread (void)
70 struct thread_info *tp;
74 tp = find_thread_ptid (inferior_ptid);
76 error (_("No thread."));
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
89 Throws an error if there is no thread or no trace. This function never
92 static struct btrace_thread_info *
95 struct thread_info *tp;
97 tp = require_btrace_thread ();
102 /* Enable branch tracing for one thread. Warn on errors. */
105 record_btrace_enable_warn (struct thread_info *tp)
107 volatile struct gdb_exception error;
109 TRY_CATCH (error, RETURN_MASK_ERROR)
112 if (error.message != NULL)
113 warning ("%s", error.message);
116 /* Callback function to disable branch tracing for one thread. */
119 record_btrace_disable_callback (void *arg)
121 struct thread_info *tp;
128 /* Enable automatic tracing of new threads. */
131 record_btrace_auto_enable (void)
133 DEBUG ("attach thread observer");
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
139 /* Disable automatic tracing of new threads. */
142 record_btrace_auto_disable (void)
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
148 DEBUG ("detach thread observer");
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
154 /* The to_open method of target record-btrace. */
157 record_btrace_open (char *args, int from_tty)
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
166 if (!target_has_execution)
167 error (_("The program is not being run."));
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
173 error (_("Record btrace can't debug inferior in non-stop mode."));
175 gdb_assert (record_btrace_thread_observer == NULL);
177 disable_chain = make_cleanup (null_cleanup, NULL);
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
183 make_cleanup (record_btrace_disable_callback, tp);
186 record_btrace_auto_enable ();
188 push_target (&record_btrace_ops);
190 observer_notify_record_changed (current_inferior (), 1);
192 discard_cleanups (disable_chain);
195 /* The to_stop_recording method of target record-btrace. */
198 record_btrace_stop_recording (struct target_ops *self)
200 struct thread_info *tp;
202 DEBUG ("stop recording");
204 record_btrace_auto_disable ();
207 if (tp->btrace.target != NULL)
211 /* The to_close method of target record-btrace. */
214 record_btrace_close (struct target_ops *self)
216 struct thread_info *tp;
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
225 btrace_teardown (tp);
228 /* The to_info_record method of target record-btrace. */
231 record_btrace_info (struct target_ops *self)
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
235 unsigned int insns, calls;
239 tp = find_thread_ptid (inferior_ptid);
241 error (_("No thread."));
248 btinfo = &tp->btrace;
250 if (!btrace_is_empty (tp))
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
257 calls = btrace_call_number (&call);
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
261 insns = btrace_insn_number (&insn);
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns, calls, tp->num,
266 target_pid_to_str (tp->ptid));
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
273 /* Print an unsigned int. */
276 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
278 ui_out_field_fmt (uiout, fld, "%u", val);
281 /* Disassemble a section of the recorded instruction trace. */
284 btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
288 struct gdbarch *gdbarch;
289 struct btrace_insn_iterator it;
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
294 gdbarch = target_gdbarch ();
296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
298 const struct btrace_insn *insn;
300 insn = btrace_insn_get (&it);
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
304 ui_out_text (uiout, "\t");
306 /* Disassembly with '/m' flag may not produce the expected result.
308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
312 /* The to_insn_history method of target record-btrace. */
315 record_btrace_insn_history (int size, int flags)
317 struct btrace_thread_info *btinfo;
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
322 unsigned int context, covered;
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
327 context = abs (size);
329 error (_("Bad record instruction-history-size."));
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
335 struct btrace_insn_iterator *replay;
337 DEBUG ("insn-history (0x%x): %d", flags, size);
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
345 btrace_insn_end (&begin, btinfo);
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
366 begin = history->begin;
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
375 covered = btrace_insn_prev (&begin, context);
380 covered = btrace_insn_next (&end, context);
385 btrace_insn_history (uiout, &begin, &end, flags);
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
394 btrace_set_insn_history (btinfo, &begin, &end);
395 do_cleanups (uiout_cleanup);
398 /* The to_insn_history_range method of target record-btrace. */
401 record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
403 struct btrace_thread_info *btinfo;
404 struct btrace_insn_history *history;
405 struct btrace_insn_iterator begin, end;
406 struct cleanup *uiout_cleanup;
407 struct ui_out *uiout;
408 unsigned int low, high;
411 uiout = current_uiout;
412 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
417 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
419 /* Check for wrap-arounds. */
420 if (low != from || high != to)
421 error (_("Bad range."));
424 error (_("Bad range."));
426 btinfo = require_btrace ();
428 found = btrace_find_insn_by_number (&begin, btinfo, low);
430 error (_("Range out of bounds."));
432 found = btrace_find_insn_by_number (&end, btinfo, high);
435 /* Silently truncate the range. */
436 btrace_insn_end (&end, btinfo);
440 /* We want both begin and end to be inclusive. */
441 btrace_insn_next (&end, 1);
444 btrace_insn_history (uiout, &begin, &end, flags);
445 btrace_set_insn_history (btinfo, &begin, &end);
447 do_cleanups (uiout_cleanup);
450 /* The to_insn_history_from method of target record-btrace. */
453 record_btrace_insn_history_from (ULONGEST from, int size, int flags)
455 ULONGEST begin, end, context;
457 context = abs (size);
459 error (_("Bad record instruction-history-size."));
468 begin = from - context + 1;
473 end = from + context - 1;
475 /* Check for wrap-around. */
480 record_btrace_insn_history_range (begin, end, flags);
483 /* Print the instruction number range for a function call history line. */
486 btrace_call_history_insn_range (struct ui_out *uiout,
487 const struct btrace_function *bfun)
489 unsigned int begin, end, size;
491 size = VEC_length (btrace_insn_s, bfun->insn);
492 gdb_assert (size > 0);
494 begin = bfun->insn_offset;
495 end = begin + size - 1;
497 ui_out_field_uint (uiout, "insn begin", begin);
498 ui_out_text (uiout, ",");
499 ui_out_field_uint (uiout, "insn end", end);
502 /* Print the source line information for a function call history line. */
505 btrace_call_history_src_line (struct ui_out *uiout,
506 const struct btrace_function *bfun)
515 ui_out_field_string (uiout, "file",
516 symtab_to_filename_for_display (sym->symtab));
518 begin = bfun->lbegin;
524 ui_out_text (uiout, ":");
525 ui_out_field_int (uiout, "min line", begin);
530 ui_out_text (uiout, ",");
531 ui_out_field_int (uiout, "max line", end);
534 /* Get the name of a branch trace function. */
537 btrace_get_bfun_name (const struct btrace_function *bfun)
539 struct minimal_symbol *msym;
549 return SYMBOL_PRINT_NAME (sym);
550 else if (msym != NULL)
551 return SYMBOL_PRINT_NAME (msym);
556 /* Disassemble a section of the recorded function trace. */
559 btrace_call_history (struct ui_out *uiout,
560 const struct btrace_thread_info *btinfo,
561 const struct btrace_call_iterator *begin,
562 const struct btrace_call_iterator *end,
563 enum record_print_flag flags)
565 struct btrace_call_iterator it;
567 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
568 btrace_call_number (end));
570 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
572 const struct btrace_function *bfun;
573 struct minimal_symbol *msym;
576 bfun = btrace_call_get (&it);
580 /* Print the function index. */
581 ui_out_field_uint (uiout, "index", bfun->number);
582 ui_out_text (uiout, "\t");
584 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
586 int level = bfun->level + btinfo->level, i;
588 for (i = 0; i < level; ++i)
589 ui_out_text (uiout, " ");
593 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
594 else if (msym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
596 else if (!ui_out_is_mi_like_p (uiout))
597 ui_out_field_string (uiout, "function", "??");
599 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
601 ui_out_text (uiout, _("\tinst "));
602 btrace_call_history_insn_range (uiout, bfun);
605 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
607 ui_out_text (uiout, _("\tat "));
608 btrace_call_history_src_line (uiout, bfun);
611 ui_out_text (uiout, "\n");
615 /* The to_call_history method of target record-btrace. */
618 record_btrace_call_history (int size, int flags)
620 struct btrace_thread_info *btinfo;
621 struct btrace_call_history *history;
622 struct btrace_call_iterator begin, end;
623 struct cleanup *uiout_cleanup;
624 struct ui_out *uiout;
625 unsigned int context, covered;
627 uiout = current_uiout;
628 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
630 context = abs (size);
632 error (_("Bad record function-call-history-size."));
634 btinfo = require_btrace ();
635 history = btinfo->call_history;
638 struct btrace_insn_iterator *replay;
640 DEBUG ("call-history (0x%x): %d", flags, size);
642 /* If we're replaying, we start at the replay position. Otherwise, we
643 start at the tail of the trace. */
644 replay = btinfo->replay;
647 begin.function = replay->function;
648 begin.btinfo = btinfo;
651 btrace_call_end (&begin, btinfo);
653 /* We start from here and expand in the requested direction. Then we
654 expand in the other direction, as well, to fill up any remaining
659 /* We want the current position covered, as well. */
660 covered = btrace_call_next (&end, 1);
661 covered += btrace_call_prev (&begin, context - covered);
662 covered += btrace_call_next (&end, context - covered);
666 covered = btrace_call_next (&end, context);
667 covered += btrace_call_prev (&begin, context- covered);
672 begin = history->begin;
675 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
676 btrace_call_number (&begin), btrace_call_number (&end));
681 covered = btrace_call_prev (&begin, context);
686 covered = btrace_call_next (&end, context);
691 btrace_call_history (uiout, btinfo, &begin, &end, flags);
695 printf_unfiltered (_("At the start of the branch trace record.\n"));
697 printf_unfiltered (_("At the end of the branch trace record.\n"));
700 btrace_set_call_history (btinfo, &begin, &end);
701 do_cleanups (uiout_cleanup);
704 /* The to_call_history_range method of target record-btrace. */
707 record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
709 struct btrace_thread_info *btinfo;
710 struct btrace_call_history *history;
711 struct btrace_call_iterator begin, end;
712 struct cleanup *uiout_cleanup;
713 struct ui_out *uiout;
714 unsigned int low, high;
717 uiout = current_uiout;
718 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
723 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
725 /* Check for wrap-arounds. */
726 if (low != from || high != to)
727 error (_("Bad range."));
730 error (_("Bad range."));
732 btinfo = require_btrace ();
734 found = btrace_find_call_by_number (&begin, btinfo, low);
736 error (_("Range out of bounds."));
738 found = btrace_find_call_by_number (&end, btinfo, high);
741 /* Silently truncate the range. */
742 btrace_call_end (&end, btinfo);
746 /* We want both begin and end to be inclusive. */
747 btrace_call_next (&end, 1);
750 btrace_call_history (uiout, btinfo, &begin, &end, flags);
751 btrace_set_call_history (btinfo, &begin, &end);
753 do_cleanups (uiout_cleanup);
756 /* The to_call_history_from method of target record-btrace. */
759 record_btrace_call_history_from (ULONGEST from, int size, int flags)
761 ULONGEST begin, end, context;
763 context = abs (size);
765 error (_("Bad record function-call-history-size."));
774 begin = from - context + 1;
779 end = from + context - 1;
781 /* Check for wrap-around. */
786 record_btrace_call_history_range (begin, end, flags);
789 /* The to_record_is_replaying method of target record-btrace. */
792 record_btrace_is_replaying (struct target_ops *self)
794 struct thread_info *tp;
797 if (btrace_is_replaying (tp))
803 /* The to_xfer_partial method of target record-btrace. */
805 static enum target_xfer_status
806 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
807 const char *annex, gdb_byte *readbuf,
808 const gdb_byte *writebuf, ULONGEST offset,
809 ULONGEST len, ULONGEST *xfered_len)
811 struct target_ops *t;
813 /* Filter out requests that don't make sense during replay. */
814 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
818 case TARGET_OBJECT_MEMORY:
820 struct target_section *section;
822 /* We do not allow writing memory in general. */
823 if (writebuf != NULL)
826 return TARGET_XFER_E_UNAVAILABLE;
829 /* We allow reading readonly memory. */
830 section = target_section_by_addr (ops, offset);
833 /* Check if the section we found is readonly. */
834 if ((bfd_get_section_flags (section->the_bfd_section->owner,
835 section->the_bfd_section)
836 & SEC_READONLY) != 0)
838 /* Truncate the request to fit into this section. */
839 len = min (len, section->endaddr - offset);
845 return TARGET_XFER_E_UNAVAILABLE;
850 /* Forward the request. */
851 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
852 if (ops->to_xfer_partial != NULL)
853 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
854 offset, len, xfered_len);
857 return TARGET_XFER_E_UNAVAILABLE;
860 /* The to_insert_breakpoint method of target record-btrace. */
863 record_btrace_insert_breakpoint (struct target_ops *ops,
864 struct gdbarch *gdbarch,
865 struct bp_target_info *bp_tgt)
867 volatile struct gdb_exception except;
870 /* Inserting breakpoints requires accessing memory. Allow it for the
871 duration of this function. */
872 old = record_btrace_allow_memory_access;
873 record_btrace_allow_memory_access = 1;
876 TRY_CATCH (except, RETURN_MASK_ALL)
877 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
879 record_btrace_allow_memory_access = old;
881 if (except.reason < 0)
882 throw_exception (except);
887 /* The to_remove_breakpoint method of target record-btrace. */
890 record_btrace_remove_breakpoint (struct target_ops *ops,
891 struct gdbarch *gdbarch,
892 struct bp_target_info *bp_tgt)
894 volatile struct gdb_exception except;
897 /* Removing breakpoints requires accessing memory. Allow it for the
898 duration of this function. */
899 old = record_btrace_allow_memory_access;
900 record_btrace_allow_memory_access = 1;
903 TRY_CATCH (except, RETURN_MASK_ALL)
904 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
906 record_btrace_allow_memory_access = old;
908 if (except.reason < 0)
909 throw_exception (except);
914 /* The to_fetch_registers method of target record-btrace. */
917 record_btrace_fetch_registers (struct target_ops *ops,
918 struct regcache *regcache, int regno)
920 struct btrace_insn_iterator *replay;
921 struct thread_info *tp;
923 tp = find_thread_ptid (inferior_ptid);
924 gdb_assert (tp != NULL);
926 replay = tp->btrace.replay;
929 const struct btrace_insn *insn;
930 struct gdbarch *gdbarch;
933 gdbarch = get_regcache_arch (regcache);
934 pcreg = gdbarch_pc_regnum (gdbarch);
938 /* We can only provide the PC register. */
939 if (regno >= 0 && regno != pcreg)
942 insn = btrace_insn_get (replay);
943 gdb_assert (insn != NULL);
945 regcache_raw_supply (regcache, regno, &insn->pc);
949 struct target_ops *t;
951 for (t = ops->beneath; t != NULL; t = t->beneath)
952 if (t->to_fetch_registers != NULL)
954 t->to_fetch_registers (t, regcache, regno);
960 /* The to_store_registers method of target record-btrace. */
963 record_btrace_store_registers (struct target_ops *ops,
964 struct regcache *regcache, int regno)
966 struct target_ops *t;
968 if (record_btrace_is_replaying (ops))
969 error (_("This record target does not allow writing registers."));
971 gdb_assert (may_write_registers != 0);
973 for (t = ops->beneath; t != NULL; t = t->beneath)
974 if (t->to_store_registers != NULL)
976 t->to_store_registers (t, regcache, regno);
983 /* The to_prepare_to_store method of target record-btrace. */
986 record_btrace_prepare_to_store (struct target_ops *ops,
987 struct regcache *regcache)
989 struct target_ops *t;
991 if (record_btrace_is_replaying (ops))
994 for (t = ops->beneath; t != NULL; t = t->beneath)
995 if (t->to_prepare_to_store != NULL)
997 t->to_prepare_to_store (t, regcache);
1002 /* The branch trace frame cache. */
1004 struct btrace_frame_cache
1007 struct thread_info *tp;
1009 /* The frame info. */
1010 struct frame_info *frame;
1012 /* The branch trace function segment. */
1013 const struct btrace_function *bfun;
1016 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1018 static htab_t bfcache;
1020 /* hash_f for htab_create_alloc of bfcache. */
1023 bfcache_hash (const void *arg)
1025 const struct btrace_frame_cache *cache = arg;
1027 return htab_hash_pointer (cache->frame);
1030 /* eq_f for htab_create_alloc of bfcache. */
1033 bfcache_eq (const void *arg1, const void *arg2)
1035 const struct btrace_frame_cache *cache1 = arg1;
1036 const struct btrace_frame_cache *cache2 = arg2;
1038 return cache1->frame == cache2->frame;
1041 /* Create a new btrace frame cache. */
1043 static struct btrace_frame_cache *
1044 bfcache_new (struct frame_info *frame)
1046 struct btrace_frame_cache *cache;
1049 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1050 cache->frame = frame;
1052 slot = htab_find_slot (bfcache, cache, INSERT);
1053 gdb_assert (*slot == NULL);
1059 /* Extract the branch trace function from a branch trace frame. */
1061 static const struct btrace_function *
1062 btrace_get_frame_function (struct frame_info *frame)
1064 const struct btrace_frame_cache *cache;
1065 const struct btrace_function *bfun;
1066 struct btrace_frame_cache pattern;
1069 pattern.frame = frame;
1071 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1079 /* Implement stop_reason method for record_btrace_frame_unwind. */
1081 static enum unwind_stop_reason
1082 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1085 const struct btrace_frame_cache *cache;
1086 const struct btrace_function *bfun;
1088 cache = *this_cache;
1090 gdb_assert (bfun != NULL);
1092 if (bfun->up == NULL)
1093 return UNWIND_UNAVAILABLE;
1095 return UNWIND_NO_REASON;
1098 /* Implement this_id method for record_btrace_frame_unwind. */
1101 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1102 struct frame_id *this_id)
1104 const struct btrace_frame_cache *cache;
1105 const struct btrace_function *bfun;
1106 CORE_ADDR code, special;
1108 cache = *this_cache;
1111 gdb_assert (bfun != NULL);
1113 while (bfun->segment.prev != NULL)
1114 bfun = bfun->segment.prev;
1116 code = get_frame_func (this_frame);
1117 special = bfun->number;
1119 *this_id = frame_id_build_unavailable_stack_special (code, special);
1121 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1122 btrace_get_bfun_name (cache->bfun),
1123 core_addr_to_string_nz (this_id->code_addr),
1124 core_addr_to_string_nz (this_id->special_addr));
1127 /* Implement prev_register method for record_btrace_frame_unwind. */
1129 static struct value *
1130 record_btrace_frame_prev_register (struct frame_info *this_frame,
1134 const struct btrace_frame_cache *cache;
1135 const struct btrace_function *bfun, *caller;
1136 const struct btrace_insn *insn;
1137 struct gdbarch *gdbarch;
1141 gdbarch = get_frame_arch (this_frame);
1142 pcreg = gdbarch_pc_regnum (gdbarch);
1143 if (pcreg < 0 || regnum != pcreg)
1144 throw_error (NOT_AVAILABLE_ERROR,
1145 _("Registers are not available in btrace record history"));
1147 cache = *this_cache;
1149 gdb_assert (bfun != NULL);
1153 throw_error (NOT_AVAILABLE_ERROR,
1154 _("No caller in btrace record history"));
1156 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1158 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1163 insn = VEC_last (btrace_insn_s, caller->insn);
1166 pc += gdb_insn_length (gdbarch, pc);
1169 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1170 btrace_get_bfun_name (bfun), bfun->level,
1171 core_addr_to_string_nz (pc));
1173 return frame_unwind_got_address (this_frame, regnum, pc);
1176 /* Implement sniffer method for record_btrace_frame_unwind. */
1179 record_btrace_frame_sniffer (const struct frame_unwind *self,
1180 struct frame_info *this_frame,
1183 const struct btrace_function *bfun;
1184 struct btrace_frame_cache *cache;
1185 struct thread_info *tp;
1186 struct frame_info *next;
1188 /* THIS_FRAME does not contain a reference to its thread. */
1189 tp = find_thread_ptid (inferior_ptid);
1190 gdb_assert (tp != NULL);
1193 next = get_next_frame (this_frame);
1196 const struct btrace_insn_iterator *replay;
1198 replay = tp->btrace.replay;
1200 bfun = replay->function;
1204 const struct btrace_function *callee;
1206 callee = btrace_get_frame_function (next);
1207 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1214 DEBUG ("[frame] sniffed frame for %s on level %d",
1215 btrace_get_bfun_name (bfun), bfun->level);
1217 /* This is our frame. Initialize the frame cache. */
1218 cache = bfcache_new (this_frame);
1222 *this_cache = cache;
1226 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1229 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1230 struct frame_info *this_frame,
1233 const struct btrace_function *bfun, *callee;
1234 struct btrace_frame_cache *cache;
1235 struct frame_info *next;
1237 next = get_next_frame (this_frame);
1241 callee = btrace_get_frame_function (next);
1245 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1252 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1253 btrace_get_bfun_name (bfun), bfun->level);
1255 /* This is our frame. Initialize the frame cache. */
1256 cache = bfcache_new (this_frame);
1257 cache->tp = find_thread_ptid (inferior_ptid);
1260 *this_cache = cache;
1265 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1267 struct btrace_frame_cache *cache;
1272 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1273 gdb_assert (slot != NULL);
1275 htab_remove_elt (bfcache, cache);
1278 /* btrace recording does not store previous memory content, neither the stack
1279 frames content. Any unwinding would return errorneous results as the stack
1280 contents no longer matches the changed PC value restored from history.
1281 Therefore this unwinder reports any possibly unwound registers as
1284 const struct frame_unwind record_btrace_frame_unwind =
1287 record_btrace_frame_unwind_stop_reason,
1288 record_btrace_frame_this_id,
1289 record_btrace_frame_prev_register,
1291 record_btrace_frame_sniffer,
1292 record_btrace_frame_dealloc_cache
1295 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1298 record_btrace_frame_unwind_stop_reason,
1299 record_btrace_frame_this_id,
1300 record_btrace_frame_prev_register,
1302 record_btrace_tailcall_frame_sniffer,
1303 record_btrace_frame_dealloc_cache
1306 /* Indicate that TP should be resumed according to FLAG. */
1309 record_btrace_resume_thread (struct thread_info *tp,
1310 enum btrace_thread_flag flag)
1312 struct btrace_thread_info *btinfo;
1314 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1316 btinfo = &tp->btrace;
1318 if ((btinfo->flags & BTHR_MOVE) != 0)
1319 error (_("Thread already moving."));
1321 /* Fetch the latest branch trace. */
1324 btinfo->flags |= flag;
1327 /* Find the thread to resume given a PTID. */
1329 static struct thread_info *
1330 record_btrace_find_resume_thread (ptid_t ptid)
1332 struct thread_info *tp;
1334 /* When asked to resume everything, we pick the current thread. */
1335 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1336 ptid = inferior_ptid;
1338 return find_thread_ptid (ptid);
1341 /* Start replaying a thread. */
1343 static struct btrace_insn_iterator *
1344 record_btrace_start_replaying (struct thread_info *tp)
1346 volatile struct gdb_exception except;
1347 struct btrace_insn_iterator *replay;
1348 struct btrace_thread_info *btinfo;
1351 btinfo = &tp->btrace;
1354 /* We can't start replaying without trace. */
1355 if (btinfo->begin == NULL)
1358 /* Clear the executing flag to allow changes to the current frame.
1359 We are not actually running, yet. We just started a reverse execution
1360 command or a record goto command.
1361 For the latter, EXECUTING is false and this has no effect.
1362 For the former, EXECUTING is true and we're in to_wait, about to
1363 move the thread. Since we need to recompute the stack, we temporarily
1364 set EXECUTING to flase. */
1365 executing = is_executing (tp->ptid);
1366 set_executing (tp->ptid, 0);
1368 /* GDB stores the current frame_id when stepping in order to detects steps
1370 Since frames are computed differently when we're replaying, we need to
1371 recompute those stored frames and fix them up so we can still detect
1372 subroutines after we started replaying. */
1373 TRY_CATCH (except, RETURN_MASK_ALL)
1375 struct frame_info *frame;
1376 struct frame_id frame_id;
1377 int upd_step_frame_id, upd_step_stack_frame_id;
1379 /* The current frame without replaying - computed via normal unwind. */
1380 frame = get_current_frame ();
1381 frame_id = get_frame_id (frame);
1383 /* Check if we need to update any stepping-related frame id's. */
1384 upd_step_frame_id = frame_id_eq (frame_id,
1385 tp->control.step_frame_id);
1386 upd_step_stack_frame_id = frame_id_eq (frame_id,
1387 tp->control.step_stack_frame_id);
1389 /* We start replaying at the end of the branch trace. This corresponds
1390 to the current instruction. */
1391 replay = xmalloc (sizeof (*replay));
1392 btrace_insn_end (replay, btinfo);
1394 /* We're not replaying, yet. */
1395 gdb_assert (btinfo->replay == NULL);
1396 btinfo->replay = replay;
1398 /* Make sure we're not using any stale registers. */
1399 registers_changed_ptid (tp->ptid);
1401 /* The current frame with replaying - computed via btrace unwind. */
1402 frame = get_current_frame ();
1403 frame_id = get_frame_id (frame);
1405 /* Replace stepping related frames where necessary. */
1406 if (upd_step_frame_id)
1407 tp->control.step_frame_id = frame_id;
1408 if (upd_step_stack_frame_id)
1409 tp->control.step_stack_frame_id = frame_id;
1412 /* Restore the previous execution state. */
1413 set_executing (tp->ptid, executing);
1415 if (except.reason < 0)
1417 xfree (btinfo->replay);
1418 btinfo->replay = NULL;
1420 registers_changed_ptid (tp->ptid);
1422 throw_exception (except);
1428 /* Stop replaying a thread. */
1431 record_btrace_stop_replaying (struct thread_info *tp)
1433 struct btrace_thread_info *btinfo;
1435 btinfo = &tp->btrace;
1437 xfree (btinfo->replay);
1438 btinfo->replay = NULL;
1440 /* Make sure we're not leaving any stale registers. */
1441 registers_changed_ptid (tp->ptid);
1444 /* The to_resume method of target record-btrace. */
1447 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1448 enum gdb_signal signal)
1450 struct thread_info *tp, *other;
1451 enum btrace_thread_flag flag;
1453 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1455 tp = record_btrace_find_resume_thread (ptid);
1457 error (_("Cannot find thread to resume."));
1459 /* Stop replaying other threads if the thread to resume is not replaying. */
1460 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1462 record_btrace_stop_replaying (other);
1464 /* As long as we're not replaying, just forward the request. */
1465 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1467 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1468 if (ops->to_resume != NULL)
1469 return ops->to_resume (ops, ptid, step, signal);
1471 error (_("Cannot find target for stepping."));
1474 /* Compute the btrace thread flag for the requested move. */
1476 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1478 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1480 /* At the moment, we only move a single thread. We could also move
1481 all threads in parallel by single-stepping each resumed thread
1482 until the first runs into an event.
1483 When we do that, we would want to continue all other threads.
1484 For now, just resume one thread to not confuse to_wait. */
1485 record_btrace_resume_thread (tp, flag);
1487 /* We just indicate the resume intent here. The actual stepping happens in
1488 record_btrace_wait below. */
1491 /* Find a thread to move. */
1493 static struct thread_info *
1494 record_btrace_find_thread_to_move (ptid_t ptid)
1496 struct thread_info *tp;
1498 /* First check the parameter thread. */
1499 tp = find_thread_ptid (ptid);
1500 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1503 /* Otherwise, find one other thread that has been resumed. */
1505 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1511 /* Return a target_waitstatus indicating that we ran out of history. */
1513 static struct target_waitstatus
1514 btrace_step_no_history (void)
1516 struct target_waitstatus status;
1518 status.kind = TARGET_WAITKIND_NO_HISTORY;
1523 /* Return a target_waitstatus indicating that a step finished. */
1525 static struct target_waitstatus
1526 btrace_step_stopped (void)
1528 struct target_waitstatus status;
1530 status.kind = TARGET_WAITKIND_STOPPED;
1531 status.value.sig = GDB_SIGNAL_TRAP;
1536 /* Clear the record histories. */
1539 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1541 xfree (btinfo->insn_history);
1542 xfree (btinfo->call_history);
1544 btinfo->insn_history = NULL;
1545 btinfo->call_history = NULL;
1548 /* Step a single thread. */
1550 static struct target_waitstatus
1551 record_btrace_step_thread (struct thread_info *tp)
1553 struct btrace_insn_iterator *replay, end;
1554 struct btrace_thread_info *btinfo;
1555 struct address_space *aspace;
1556 struct inferior *inf;
1557 enum btrace_thread_flag flags;
1560 btinfo = &tp->btrace;
1561 replay = btinfo->replay;
1563 flags = btinfo->flags & BTHR_MOVE;
1564 btinfo->flags &= ~BTHR_MOVE;
1566 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1571 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1574 /* We're done if we're not replaying. */
1576 return btrace_step_no_history ();
1578 /* We are always able to step at least once. */
1579 steps = btrace_insn_next (replay, 1);
1580 gdb_assert (steps == 1);
1582 /* Determine the end of the instruction trace. */
1583 btrace_insn_end (&end, btinfo);
1585 /* We stop replaying if we reached the end of the trace. */
1586 if (btrace_insn_cmp (replay, &end) == 0)
1587 record_btrace_stop_replaying (tp);
1589 return btrace_step_stopped ();
1592 /* Start replaying if we're not already doing so. */
1594 replay = record_btrace_start_replaying (tp);
1596 /* If we can't step any further, we reached the end of the history. */
1597 steps = btrace_insn_prev (replay, 1);
1599 return btrace_step_no_history ();
1601 return btrace_step_stopped ();
1604 /* We're done if we're not replaying. */
1606 return btrace_step_no_history ();
1608 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1609 aspace = inf->aspace;
1611 /* Determine the end of the instruction trace. */
1612 btrace_insn_end (&end, btinfo);
1616 const struct btrace_insn *insn;
1618 /* We are always able to step at least once. */
1619 steps = btrace_insn_next (replay, 1);
1620 gdb_assert (steps == 1);
1622 /* We stop replaying if we reached the end of the trace. */
1623 if (btrace_insn_cmp (replay, &end) == 0)
1625 record_btrace_stop_replaying (tp);
1626 return btrace_step_no_history ();
1629 insn = btrace_insn_get (replay);
1632 DEBUG ("stepping %d (%s) ... %s", tp->num,
1633 target_pid_to_str (tp->ptid),
1634 core_addr_to_string_nz (insn->pc));
1636 if (breakpoint_here_p (aspace, insn->pc))
1637 return btrace_step_stopped ();
1641 /* Start replaying if we're not already doing so. */
1643 replay = record_btrace_start_replaying (tp);
1645 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1646 aspace = inf->aspace;
1650 const struct btrace_insn *insn;
1652 /* If we can't step any further, we're done. */
1653 steps = btrace_insn_prev (replay, 1);
1655 return btrace_step_no_history ();
1657 insn = btrace_insn_get (replay);
1660 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1661 target_pid_to_str (tp->ptid),
1662 core_addr_to_string_nz (insn->pc));
1664 if (breakpoint_here_p (aspace, insn->pc))
1665 return btrace_step_stopped ();
1670 /* The to_wait method of target record-btrace. */
1673 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1674 struct target_waitstatus *status, int options)
1676 struct thread_info *tp, *other;
1678 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1680 /* As long as we're not replaying, just forward the request. */
1681 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1683 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1684 if (ops->to_wait != NULL)
1685 return ops->to_wait (ops, ptid, status, options);
1687 error (_("Cannot find target for waiting."));
1690 /* Let's find a thread to move. */
1691 tp = record_btrace_find_thread_to_move (ptid);
1694 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1696 status->kind = TARGET_WAITKIND_IGNORE;
1697 return minus_one_ptid;
1700 /* We only move a single thread. We're not able to correlate threads. */
1701 *status = record_btrace_step_thread (tp);
1703 /* Stop all other threads. */
1706 other->btrace.flags &= ~BTHR_MOVE;
1708 /* Start record histories anew from the current position. */
1709 record_btrace_clear_histories (&tp->btrace);
1711 /* We moved the replay position but did not update registers. */
1712 registers_changed_ptid (tp->ptid);
1717 /* The to_can_execute_reverse method of target record-btrace. */
1720 record_btrace_can_execute_reverse (struct target_ops *self)
1725 /* The to_decr_pc_after_break method of target record-btrace. */
1728 record_btrace_decr_pc_after_break (struct target_ops *ops,
1729 struct gdbarch *gdbarch)
1731 /* When replaying, we do not actually execute the breakpoint instruction
1732 so there is no need to adjust the PC after hitting a breakpoint. */
1733 if (record_btrace_is_replaying (ops))
1736 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
1739 /* The to_find_new_threads method of target record-btrace. */
1742 record_btrace_find_new_threads (struct target_ops *ops)
1744 /* Don't expect new threads if we're replaying. */
1745 if (record_btrace_is_replaying (ops))
1748 /* Forward the request. */
1749 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1750 if (ops->to_find_new_threads != NULL)
1752 ops->to_find_new_threads (ops);
1757 /* The to_thread_alive method of target record-btrace. */
1760 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1762 /* We don't add or remove threads during replay. */
1763 if (record_btrace_is_replaying (ops))
1764 return find_thread_ptid (ptid) != NULL;
1766 /* Forward the request. */
1767 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1768 if (ops->to_thread_alive != NULL)
1769 return ops->to_thread_alive (ops, ptid);
1774 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1778 record_btrace_set_replay (struct thread_info *tp,
1779 const struct btrace_insn_iterator *it)
1781 struct btrace_thread_info *btinfo;
1783 btinfo = &tp->btrace;
1785 if (it == NULL || it->function == NULL)
1786 record_btrace_stop_replaying (tp);
1789 if (btinfo->replay == NULL)
1790 record_btrace_start_replaying (tp);
1791 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1794 *btinfo->replay = *it;
1795 registers_changed_ptid (tp->ptid);
1798 /* Start anew from the new replay position. */
1799 record_btrace_clear_histories (btinfo);
1802 /* The to_goto_record_begin method of target record-btrace. */
1805 record_btrace_goto_begin (struct target_ops *self)
1807 struct thread_info *tp;
1808 struct btrace_insn_iterator begin;
1810 tp = require_btrace_thread ();
1812 btrace_insn_begin (&begin, &tp->btrace);
1813 record_btrace_set_replay (tp, &begin);
1815 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1818 /* The to_goto_record_end method of target record-btrace. */
1821 record_btrace_goto_end (struct target_ops *ops)
1823 struct thread_info *tp;
1825 tp = require_btrace_thread ();
1827 record_btrace_set_replay (tp, NULL);
1829 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1832 /* The to_goto_record method of target record-btrace. */
1835 record_btrace_goto (ULONGEST insn)
1837 struct thread_info *tp;
1838 struct btrace_insn_iterator it;
1839 unsigned int number;
1844 /* Check for wrap-arounds. */
1846 error (_("Instruction number out of range."));
1848 tp = require_btrace_thread ();
1850 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1852 error (_("No such instruction."));
1854 record_btrace_set_replay (tp, &it);
1856 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1859 /* Initialize the record-btrace target ops. */
1862 init_record_btrace_ops (void)
1864 struct target_ops *ops;
1866 ops = &record_btrace_ops;
1867 ops->to_shortname = "record-btrace";
1868 ops->to_longname = "Branch tracing target";
1869 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1870 ops->to_open = record_btrace_open;
1871 ops->to_close = record_btrace_close;
1872 ops->to_detach = record_detach;
1873 ops->to_disconnect = record_disconnect;
1874 ops->to_mourn_inferior = record_mourn_inferior;
1875 ops->to_kill = record_kill;
1876 ops->to_create_inferior = find_default_create_inferior;
1877 ops->to_stop_recording = record_btrace_stop_recording;
1878 ops->to_info_record = record_btrace_info;
1879 ops->to_insn_history = record_btrace_insn_history;
1880 ops->to_insn_history_from = record_btrace_insn_history_from;
1881 ops->to_insn_history_range = record_btrace_insn_history_range;
1882 ops->to_call_history = record_btrace_call_history;
1883 ops->to_call_history_from = record_btrace_call_history_from;
1884 ops->to_call_history_range = record_btrace_call_history_range;
1885 ops->to_record_is_replaying = record_btrace_is_replaying;
1886 ops->to_xfer_partial = record_btrace_xfer_partial;
1887 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1888 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1889 ops->to_fetch_registers = record_btrace_fetch_registers;
1890 ops->to_store_registers = record_btrace_store_registers;
1891 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1892 ops->to_get_unwinder = &record_btrace_frame_unwind;
1893 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
1894 ops->to_resume = record_btrace_resume;
1895 ops->to_wait = record_btrace_wait;
1896 ops->to_find_new_threads = record_btrace_find_new_threads;
1897 ops->to_thread_alive = record_btrace_thread_alive;
1898 ops->to_goto_record_begin = record_btrace_goto_begin;
1899 ops->to_goto_record_end = record_btrace_goto_end;
1900 ops->to_goto_record = record_btrace_goto;
1901 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1902 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1903 ops->to_stratum = record_stratum;
1904 ops->to_magic = OPS_MAGIC;
1907 /* Alias for "target record". */
1910 cmd_record_btrace_start (char *args, int from_tty)
1912 if (args != NULL && *args != 0)
1913 error (_("Invalid argument."));
1915 execute_command ("target record-btrace", from_tty);
1918 void _initialize_record_btrace (void);
1920 /* Initialize btrace commands. */
1923 _initialize_record_btrace (void)
1925 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1926 _("Start branch trace recording."),
1928 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1930 init_record_btrace_ops ();
1931 add_target (&record_btrace_ops);
1933 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,