1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "gdbthread.h"
24 #include "exceptions.h"
31 #include "filenames.h"
32 #include "xml-support.h"
34 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
35 when used in if statements. */
37 #define DEBUG(msg, args...) \
40 if (record_debug != 0) \
41 fprintf_unfiltered (gdb_stdlog, \
42 "[btrace] " msg "\n", ##args); \
46 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48 /* Return the function name of a recorded function segment for printing.
49 This function never returns NULL. */
52 ftrace_print_function_name (const struct btrace_function *bfun)
54 struct minimal_symbol *msym;
61 return SYMBOL_PRINT_NAME (sym);
64 return SYMBOL_PRINT_NAME (msym);
69 /* Return the file name of a recorded function segment for printing.
70 This function never returns NULL. */
73 ftrace_print_filename (const struct btrace_function *bfun)
81 filename = symtab_to_filename_for_display (sym->symtab);
83 filename = "<unknown>";
88 /* Return a string representation of the address of an instruction.
89 This function never returns NULL. */
92 ftrace_print_insn_addr (const struct btrace_insn *insn)
97 return core_addr_to_string_nz (insn->pc);
100 /* Print an ftrace debug status message. */
103 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 const char *fun, *file;
106 unsigned int ibegin, iend;
107 int lbegin, lend, level;
109 fun = ftrace_print_function_name (bfun);
110 file = ftrace_print_filename (bfun);
113 lbegin = bfun->lbegin;
116 ibegin = bfun->insn_offset;
117 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
120 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
124 /* Return non-zero if BFUN does not match MFUN and FUN,
125 return zero otherwise. */
128 ftrace_function_switched (const struct btrace_function *bfun,
129 const struct minimal_symbol *mfun,
130 const struct symbol *fun)
132 struct minimal_symbol *msym;
138 /* If the minimal symbol changed, we certainly switched functions. */
139 if (mfun != NULL && msym != NULL
140 && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
143 /* If the symbol changed, we certainly switched functions. */
144 if (fun != NULL && sym != NULL)
146 const char *bfname, *fname;
148 /* Check the function name. */
149 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
152 /* Check the location of those functions, as well. */
153 bfname = symtab_to_fullname (sym->symtab);
154 fname = symtab_to_fullname (fun->symtab);
155 if (filename_cmp (fname, bfname) != 0)
159 /* If we lost symbol information, we switched functions. */
160 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
163 /* If we gained symbol information, we switched functions. */
164 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
170 /* Return non-zero if we should skip this file when generating the function
171 call history, zero otherwise.
172 We would want to do that if, say, a macro that is defined in another file
173 is expanded in this function. */
176 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
185 bfile = symtab_to_fullname (sym->symtab);
187 return (filename_cmp (bfile, fullname) != 0);
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
194 static struct btrace_function *
195 ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
199 struct btrace_function *bfun;
201 bfun = xzalloc (sizeof (*bfun));
205 bfun->flow.prev = prev;
207 /* We start with the identities of min and max, respectively. */
208 bfun->lbegin = INT_MAX;
209 bfun->lend = INT_MIN;
213 /* Start counting at one. */
215 bfun->insn_offset = 1;
219 gdb_assert (prev->flow.next == NULL);
220 prev->flow.next = bfun;
222 bfun->number = prev->number + 1;
223 bfun->insn_offset = (prev->insn_offset
224 + VEC_length (btrace_insn_s, prev->insn));
230 /* Update the UP field of a function segment. */
233 ftrace_update_caller (struct btrace_function *bfun,
234 struct btrace_function *caller,
235 enum btrace_function_flag flags)
237 if (bfun->up != NULL)
238 ftrace_debug (bfun, "updating caller");
243 ftrace_debug (bfun, "set caller");
246 /* Fix up the caller for all segments of a function. */
249 ftrace_fixup_caller (struct btrace_function *bfun,
250 struct btrace_function *caller,
251 enum btrace_function_flag flags)
253 struct btrace_function *prev, *next;
255 ftrace_update_caller (bfun, caller, flags);
257 /* Update all function segments belonging to the same function. */
258 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
259 ftrace_update_caller (prev, caller, flags);
261 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
262 ftrace_update_caller (next, caller, flags);
265 /* Add a new function segment for a call.
266 CALLER is the chronologically preceding function segment.
267 MFUN and FUN are the symbol information we have for this function. */
269 static struct btrace_function *
270 ftrace_new_call (struct btrace_function *caller,
271 struct minimal_symbol *mfun,
274 struct btrace_function *bfun;
276 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->level = caller->level + 1;
280 ftrace_debug (bfun, "new call");
285 /* Add a new function segment for a tail call.
286 CALLER is the chronologically preceding function segment.
287 MFUN and FUN are the symbol information we have for this function. */
289 static struct btrace_function *
290 ftrace_new_tailcall (struct btrace_function *caller,
291 struct minimal_symbol *mfun,
294 struct btrace_function *bfun;
296 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->level = caller->level + 1;
299 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
301 ftrace_debug (bfun, "new tail call");
306 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
307 symbol information. */
309 static struct btrace_function *
310 ftrace_find_caller (struct btrace_function *bfun,
311 struct minimal_symbol *mfun,
314 for (; bfun != NULL; bfun = bfun->up)
316 /* Skip functions with incompatible symbol information. */
317 if (ftrace_function_switched (bfun, mfun, fun))
320 /* This is the function segment we're looking for. */
327 /* Find the innermost caller in the back trace of BFUN, skipping all
328 function segments that do not end with a call instruction (e.g.
329 tail calls ending with a jump). */
331 static struct btrace_function *
332 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
334 for (; bfun != NULL; bfun = bfun->up)
336 struct btrace_insn *last;
339 /* We do not allow empty function segments. */
340 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
342 last = VEC_last (btrace_insn_s, bfun->insn);
345 if (gdbarch_insn_is_call (gdbarch, pc))
352 /* Add a continuation segment for a function into which we return.
353 PREV is the chronologically preceding function segment.
354 MFUN and FUN are the symbol information we have for this function. */
356 static struct btrace_function *
357 ftrace_new_return (struct gdbarch *gdbarch,
358 struct btrace_function *prev,
359 struct minimal_symbol *mfun,
362 struct btrace_function *bfun, *caller;
364 bfun = ftrace_new_function (prev, mfun, fun);
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
378 /* Maintain the function level. */
379 bfun->level = caller->level;
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
385 ftrace_debug (bfun, "new return");
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
392 /* Let's search for some actual call. */
393 caller = ftrace_find_call (gdbarch, prev->up);
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
403 /* We maintain levels for a series of returns for which we have
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413 ftrace_debug (bfun, "new return - no caller");
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
421 ftrace_debug (bfun, "new return - unknown caller");
428 /* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
432 static struct btrace_function *
433 ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
437 struct btrace_function *bfun;
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
443 /* We keep the function level. */
444 bfun->level = prev->level;
446 ftrace_debug (bfun, "new switch");
451 /* Update BFUN with respect to the instruction at PC. This may create new
453 Return the chronologically latest function segment, never NULL. */
455 static struct btrace_function *
456 ftrace_update_function (struct gdbarch *gdbarch,
457 struct btrace_function *bfun, CORE_ADDR pc)
459 struct bound_minimal_symbol bmfun;
460 struct minimal_symbol *mfun;
462 struct btrace_insn *last;
464 /* Try to determine the function we're in. We use both types of symbols
465 to avoid surprises when we sometimes get a full symbol and sometimes
466 only a minimal symbol. */
467 fun = find_pc_function (pc);
468 bmfun = lookup_minimal_symbol_by_pc (pc);
471 if (fun == NULL && mfun == NULL)
472 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
474 /* If we didn't have a function before, we create one. */
476 return ftrace_new_function (bfun, mfun, fun);
478 /* Check the last instruction, if we have one.
479 We do this check first, since it allows us to fill in the call stack
480 links in addition to the normal flow links. */
482 if (!VEC_empty (btrace_insn_s, bfun->insn))
483 last = VEC_last (btrace_insn_s, bfun->insn);
491 /* Check for returns. */
492 if (gdbarch_insn_is_ret (gdbarch, lpc))
493 return ftrace_new_return (gdbarch, bfun, mfun, fun);
495 /* Check for calls. */
496 if (gdbarch_insn_is_call (gdbarch, lpc))
500 size = gdb_insn_length (gdbarch, lpc);
502 /* Ignore calls to the next instruction. They are used for PIC. */
503 if (lpc + size != pc)
504 return ftrace_new_call (bfun, mfun, fun);
508 /* Check if we're switching functions for some other reason. */
509 if (ftrace_function_switched (bfun, mfun, fun))
511 DEBUG_FTRACE ("switching from %s in %s at %s",
512 ftrace_print_insn_addr (last),
513 ftrace_print_function_name (bfun),
514 ftrace_print_filename (bfun));
518 CORE_ADDR start, lpc;
520 start = get_pc_function_start (pc);
522 /* If we can't determine the function for PC, we treat a jump at
523 the end of the block as tail call. */
529 /* Jumps indicate optimized tail calls. */
530 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
531 return ftrace_new_tailcall (bfun, mfun, fun);
534 return ftrace_new_switch (bfun, mfun, fun);
540 /* Update BFUN's source range with respect to the instruction at PC. */
543 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
545 struct symtab_and_line sal;
546 const char *fullname;
548 sal = find_pc_line (pc, 0);
549 if (sal.symtab == NULL || sal.line == 0)
551 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
555 /* Check if we switched files. This could happen if, say, a macro that
556 is defined in another file is expanded here. */
557 fullname = symtab_to_fullname (sal.symtab);
558 if (ftrace_skip_file (bfun, fullname))
560 DEBUG_FTRACE ("ignoring file at %s, file=%s",
561 core_addr_to_string_nz (pc), fullname);
565 /* Update the line range. */
566 bfun->lbegin = min (bfun->lbegin, sal.line);
567 bfun->lend = max (bfun->lend, sal.line);
569 if (record_debug > 1)
570 ftrace_debug (bfun, "update lines");
573 /* Add the instruction at PC to BFUN's instructions. */
576 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
578 struct btrace_insn *insn;
580 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
583 if (record_debug > 1)
584 ftrace_debug (bfun, "update insn");
587 /* Compute the function branch trace from a block branch trace BTRACE for
588 a thread given by BTINFO. */
591 btrace_compute_ftrace (struct btrace_thread_info *btinfo,
592 VEC (btrace_block_s) *btrace)
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
599 DEBUG ("compute ftrace");
601 gdbarch = target_gdbarch ();
605 blk = VEC_length (btrace_block_s, btrace);
609 btrace_block_s *block;
614 block = VEC_index (btrace_block_s, btrace, blk);
621 /* We should hit the end of the block. Warn if we went too far. */
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
629 end = ftrace_update_function (gdbarch, end, pc);
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
636 level = min (level, end->level);
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
645 size = gdb_insn_length (gdbarch, pc);
647 /* Make sure we terminate if we fail to compute the size. */
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
664 level = min (level, end->level);
668 btinfo->begin = begin;
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
680 btrace_enable (struct thread_info *tp)
682 if (tp->btrace.target != NULL)
685 if (!target_supports_btrace ())
686 error (_("Target does not support branch tracing."));
688 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
690 tp->btrace.target = target_enable_btrace (tp->ptid);
696 btrace_disable (struct thread_info *tp)
698 struct btrace_thread_info *btp = &tp->btrace;
701 if (btp->target == NULL)
704 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
706 target_disable_btrace (btp->target);
715 btrace_teardown (struct thread_info *tp)
717 struct btrace_thread_info *btp = &tp->btrace;
720 if (btp->target == NULL)
723 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
725 target_teardown_btrace (btp->target);
734 btrace_fetch (struct thread_info *tp)
736 struct btrace_thread_info *btinfo;
737 VEC (btrace_block_s) *btrace;
738 struct cleanup *cleanup;
740 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
742 btinfo = &tp->btrace;
743 if (btinfo->target == NULL)
746 btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
747 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
749 if (!VEC_empty (btrace_block_s, btrace))
752 btrace_compute_ftrace (btinfo, btrace);
755 do_cleanups (cleanup);
761 btrace_clear (struct thread_info *tp)
763 struct btrace_thread_info *btinfo;
764 struct btrace_function *it, *trash;
766 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
768 btinfo = &tp->btrace;
779 btinfo->begin = NULL;
782 xfree (btinfo->insn_history);
783 xfree (btinfo->call_history);
784 xfree (btinfo->replay);
786 btinfo->insn_history = NULL;
787 btinfo->call_history = NULL;
788 btinfo->replay = NULL;
794 btrace_free_objfile (struct objfile *objfile)
796 struct thread_info *tp;
798 DEBUG ("free objfile");
804 #if defined (HAVE_LIBEXPAT)
806 /* Check the btrace document version. */
809 check_xml_btrace_version (struct gdb_xml_parser *parser,
810 const struct gdb_xml_element *element,
811 void *user_data, VEC (gdb_xml_value_s) *attributes)
813 const char *version = xml_find_attribute (attributes, "version")->value;
815 if (strcmp (version, "1.0") != 0)
816 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
819 /* Parse a btrace "block" xml record. */
822 parse_xml_btrace_block (struct gdb_xml_parser *parser,
823 const struct gdb_xml_element *element,
824 void *user_data, VEC (gdb_xml_value_s) *attributes)
826 VEC (btrace_block_s) **btrace;
827 struct btrace_block *block;
828 ULONGEST *begin, *end;
831 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
833 begin = xml_find_attribute (attributes, "begin")->value;
834 end = xml_find_attribute (attributes, "end")->value;
836 block->begin = *begin;
840 static const struct gdb_xml_attribute block_attributes[] = {
841 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
842 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
843 { NULL, GDB_XML_AF_NONE, NULL, NULL }
846 static const struct gdb_xml_attribute btrace_attributes[] = {
847 { "version", GDB_XML_AF_NONE, NULL, NULL },
848 { NULL, GDB_XML_AF_NONE, NULL, NULL }
851 static const struct gdb_xml_element btrace_children[] = {
852 { "block", block_attributes, NULL,
853 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
854 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
857 static const struct gdb_xml_element btrace_elements[] = {
858 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
859 check_xml_btrace_version, NULL },
860 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
863 #endif /* defined (HAVE_LIBEXPAT) */
867 VEC (btrace_block_s) *
868 parse_xml_btrace (const char *buffer)
870 VEC (btrace_block_s) *btrace = NULL;
871 struct cleanup *cleanup;
874 #if defined (HAVE_LIBEXPAT)
876 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
877 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
881 do_cleanups (cleanup);
885 /* Keep parse results. */
886 discard_cleanups (cleanup);
888 #else /* !defined (HAVE_LIBEXPAT) */
890 error (_("Cannot process branch trace. XML parsing is not supported."));
892 #endif /* !defined (HAVE_LIBEXPAT) */
899 const struct btrace_insn *
900 btrace_insn_get (const struct btrace_insn_iterator *it)
902 const struct btrace_function *bfun;
903 unsigned int index, end;
908 /* The index is within the bounds of this function's instruction vector. */
909 end = VEC_length (btrace_insn_s, bfun->insn);
910 gdb_assert (0 < end);
911 gdb_assert (index < end);
913 return VEC_index (btrace_insn_s, bfun->insn, index);
919 btrace_insn_number (const struct btrace_insn_iterator *it)
921 const struct btrace_function *bfun;
924 return bfun->insn_offset + it->index;
930 btrace_insn_begin (struct btrace_insn_iterator *it,
931 const struct btrace_thread_info *btinfo)
933 const struct btrace_function *bfun;
935 bfun = btinfo->begin;
937 error (_("No trace."));
946 btrace_insn_end (struct btrace_insn_iterator *it,
947 const struct btrace_thread_info *btinfo)
949 const struct btrace_function *bfun;
954 error (_("No trace."));
956 /* The last instruction in the last function is the current instruction.
957 We point to it - it is one past the end of the execution trace. */
958 length = VEC_length (btrace_insn_s, bfun->insn);
961 it->index = length - 1;
967 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
969 const struct btrace_function *bfun;
970 unsigned int index, steps;
978 unsigned int end, space, adv;
980 end = VEC_length (btrace_insn_s, bfun->insn);
982 gdb_assert (0 < end);
983 gdb_assert (index < end);
985 /* Compute the number of instructions remaining in this segment. */
988 /* Advance the iterator as far as possible within this segment. */
989 adv = min (space, stride);
994 /* Move to the next function if we're at the end of this one. */
997 const struct btrace_function *next;
999 next = bfun->flow.next;
1002 /* We stepped past the last function.
1004 Let's adjust the index to point to the last instruction in
1005 the previous function. */
1011 /* We now point to the first instruction in the new function. */
1016 /* We did make progress. */
1017 gdb_assert (adv > 0);
1020 /* Update the iterator. */
1021 it->function = bfun;
1030 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1032 const struct btrace_function *bfun;
1033 unsigned int index, steps;
1035 bfun = it->function;
1043 /* Move to the previous function if we're at the start of this one. */
1046 const struct btrace_function *prev;
1048 prev = bfun->flow.prev;
1052 /* We point to one after the last instruction in the new function. */
1054 index = VEC_length (btrace_insn_s, bfun->insn);
1056 /* There is at least one instruction in this function segment. */
1057 gdb_assert (index > 0);
1060 /* Advance the iterator as far as possible within this segment. */
1061 adv = min (index, stride);
1066 /* We did make progress. */
1067 gdb_assert (adv > 0);
1070 /* Update the iterator. */
1071 it->function = bfun;
1080 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1081 const struct btrace_insn_iterator *rhs)
1083 unsigned int lnum, rnum;
1085 lnum = btrace_insn_number (lhs);
1086 rnum = btrace_insn_number (rhs);
1088 return (int) (lnum - rnum);
1094 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1095 const struct btrace_thread_info *btinfo,
1096 unsigned int number)
1098 const struct btrace_function *bfun;
1101 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1102 if (bfun->insn_offset <= number)
1108 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1112 it->function = bfun;
1113 it->index = number - bfun->insn_offset;
1120 const struct btrace_function *
1121 btrace_call_get (const struct btrace_call_iterator *it)
1123 return it->function;
1129 btrace_call_number (const struct btrace_call_iterator *it)
1131 const struct btrace_thread_info *btinfo;
1132 const struct btrace_function *bfun;
1135 btinfo = it->btinfo;
1136 bfun = it->function;
1138 return bfun->number;
1140 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1141 number of the last function. */
1143 insns = VEC_length (btrace_insn_s, bfun->insn);
1145 /* If the function contains only a single instruction (i.e. the current
1146 instruction), it will be skipped and its number is already the number
1149 return bfun->number;
1151 /* Otherwise, return one more than the number of the last function. */
1152 return bfun->number + 1;
1158 btrace_call_begin (struct btrace_call_iterator *it,
1159 const struct btrace_thread_info *btinfo)
1161 const struct btrace_function *bfun;
1163 bfun = btinfo->begin;
1165 error (_("No trace."));
1167 it->btinfo = btinfo;
1168 it->function = bfun;
1174 btrace_call_end (struct btrace_call_iterator *it,
1175 const struct btrace_thread_info *btinfo)
1177 const struct btrace_function *bfun;
1181 error (_("No trace."));
1183 it->btinfo = btinfo;
1184 it->function = NULL;
1190 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1192 const struct btrace_function *bfun;
1195 bfun = it->function;
1197 while (bfun != NULL)
1199 const struct btrace_function *next;
1202 next = bfun->flow.next;
1205 /* Ignore the last function if it only contains a single
1206 (i.e. the current) instruction. */
1207 insns = VEC_length (btrace_insn_s, bfun->insn);
1212 if (stride == steps)
1219 it->function = bfun;
1226 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1228 const struct btrace_thread_info *btinfo;
1229 const struct btrace_function *bfun;
1232 bfun = it->function;
1239 btinfo = it->btinfo;
1244 /* Ignore the last function if it only contains a single
1245 (i.e. the current) instruction. */
1246 insns = VEC_length (btrace_insn_s, bfun->insn);
1248 bfun = bfun->flow.prev;
1256 while (steps < stride)
1258 const struct btrace_function *prev;
1260 prev = bfun->flow.prev;
1268 it->function = bfun;
1275 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1276 const struct btrace_call_iterator *rhs)
1278 unsigned int lnum, rnum;
1280 lnum = btrace_call_number (lhs);
1281 rnum = btrace_call_number (rhs);
1283 return (int) (lnum - rnum);
1289 btrace_find_call_by_number (struct btrace_call_iterator *it,
1290 const struct btrace_thread_info *btinfo,
1291 unsigned int number)
1293 const struct btrace_function *bfun;
1295 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1299 bnum = bfun->number;
1302 it->btinfo = btinfo;
1303 it->function = bfun;
1307 /* Functions are ordered and numbered consecutively. We could bail out
1308 earlier. On the other hand, it is very unlikely that we search for
1309 a nonexistent function. */
1318 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1319 const struct btrace_insn_iterator *begin,
1320 const struct btrace_insn_iterator *end)
1322 if (btinfo->insn_history == NULL)
1323 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1325 btinfo->insn_history->begin = *begin;
1326 btinfo->insn_history->end = *end;
1332 btrace_set_call_history (struct btrace_thread_info *btinfo,
1333 const struct btrace_call_iterator *begin,
1334 const struct btrace_call_iterator *end)
1336 gdb_assert (begin->btinfo == end->btinfo);
1338 if (btinfo->call_history == NULL)
1339 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1341 btinfo->call_history->begin = *begin;
1342 btinfo->call_history->end = *end;
1348 btrace_is_replaying (struct thread_info *tp)
1350 return tp->btrace.replay != NULL;