1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "gdbthread.h"
24 #include "exceptions.h"
31 #include "filenames.h"
32 #include "xml-support.h"
34 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
35 when used in if statements. */
37 #define DEBUG(msg, args...) \
40 if (record_debug != 0) \
41 fprintf_unfiltered (gdb_stdlog, \
42 "[btrace] " msg "\n", ##args); \
46 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48 /* Return the function name of a recorded function segment for printing.
49 This function never returns NULL. */
52 ftrace_print_function_name (const struct btrace_function *bfun)
54 struct minimal_symbol *msym;
61 return SYMBOL_PRINT_NAME (sym);
64 return SYMBOL_PRINT_NAME (msym);
69 /* Return the file name of a recorded function segment for printing.
70 This function never returns NULL. */
73 ftrace_print_filename (const struct btrace_function *bfun)
81 filename = symtab_to_filename_for_display (sym->symtab);
83 filename = "<unknown>";
88 /* Return a string representation of the address of an instruction.
89 This function never returns NULL. */
92 ftrace_print_insn_addr (const struct btrace_insn *insn)
97 return core_addr_to_string_nz (insn->pc);
100 /* Print an ftrace debug status message. */
103 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 const char *fun, *file;
106 unsigned int ibegin, iend;
107 int lbegin, lend, level;
109 fun = ftrace_print_function_name (bfun);
110 file = ftrace_print_filename (bfun);
113 lbegin = bfun->lbegin;
116 ibegin = bfun->insn_offset;
117 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
120 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
124 /* Return non-zero if BFUN does not match MFUN and FUN,
125 return zero otherwise. */
128 ftrace_function_switched (const struct btrace_function *bfun,
129 const struct minimal_symbol *mfun,
130 const struct symbol *fun)
132 struct minimal_symbol *msym;
138 /* If the minimal symbol changed, we certainly switched functions. */
139 if (mfun != NULL && msym != NULL
140 && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
143 /* If the symbol changed, we certainly switched functions. */
144 if (fun != NULL && sym != NULL)
146 const char *bfname, *fname;
148 /* Check the function name. */
149 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
152 /* Check the location of those functions, as well. */
153 bfname = symtab_to_fullname (sym->symtab);
154 fname = symtab_to_fullname (fun->symtab);
155 if (filename_cmp (fname, bfname) != 0)
159 /* If we lost symbol information, we switched functions. */
160 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
163 /* If we gained symbol information, we switched functions. */
164 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
170 /* Return non-zero if we should skip this file when generating the function
171 call history, zero otherwise.
172 We would want to do that if, say, a macro that is defined in another file
173 is expanded in this function. */
176 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
185 bfile = symtab_to_fullname (sym->symtab);
187 return (filename_cmp (bfile, fullname) != 0);
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
194 static struct btrace_function *
195 ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
199 struct btrace_function *bfun;
201 bfun = xzalloc (sizeof (*bfun));
205 bfun->flow.prev = prev;
207 /* We start with the identities of min and max, respectively. */
208 bfun->lbegin = INT_MAX;
209 bfun->lend = INT_MIN;
213 /* Start counting at one. */
215 bfun->insn_offset = 1;
219 gdb_assert (prev->flow.next == NULL);
220 prev->flow.next = bfun;
222 bfun->number = prev->number + 1;
223 bfun->insn_offset = (prev->insn_offset
224 + VEC_length (btrace_insn_s, prev->insn));
230 /* Update the UP field of a function segment. */
233 ftrace_update_caller (struct btrace_function *bfun,
234 struct btrace_function *caller,
235 enum btrace_function_flag flags)
237 if (bfun->up != NULL)
238 ftrace_debug (bfun, "updating caller");
243 ftrace_debug (bfun, "set caller");
246 /* Fix up the caller for all segments of a function. */
249 ftrace_fixup_caller (struct btrace_function *bfun,
250 struct btrace_function *caller,
251 enum btrace_function_flag flags)
253 struct btrace_function *prev, *next;
255 ftrace_update_caller (bfun, caller, flags);
257 /* Update all function segments belonging to the same function. */
258 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
259 ftrace_update_caller (prev, caller, flags);
261 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
262 ftrace_update_caller (next, caller, flags);
265 /* Add a new function segment for a call.
266 CALLER is the chronologically preceding function segment.
267 MFUN and FUN are the symbol information we have for this function. */
269 static struct btrace_function *
270 ftrace_new_call (struct btrace_function *caller,
271 struct minimal_symbol *mfun,
274 struct btrace_function *bfun;
276 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->level = caller->level + 1;
280 ftrace_debug (bfun, "new call");
285 /* Add a new function segment for a tail call.
286 CALLER is the chronologically preceding function segment.
287 MFUN and FUN are the symbol information we have for this function. */
289 static struct btrace_function *
290 ftrace_new_tailcall (struct btrace_function *caller,
291 struct minimal_symbol *mfun,
294 struct btrace_function *bfun;
296 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->level = caller->level + 1;
299 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
301 ftrace_debug (bfun, "new tail call");
306 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
307 symbol information. */
309 static struct btrace_function *
310 ftrace_find_caller (struct btrace_function *bfun,
311 struct minimal_symbol *mfun,
314 for (; bfun != NULL; bfun = bfun->up)
316 /* Skip functions with incompatible symbol information. */
317 if (ftrace_function_switched (bfun, mfun, fun))
320 /* This is the function segment we're looking for. */
327 /* Find the innermost caller in the back trace of BFUN, skipping all
328 function segments that do not end with a call instruction (e.g.
329 tail calls ending with a jump). */
331 static struct btrace_function *
332 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
334 for (; bfun != NULL; bfun = bfun->up)
336 struct btrace_insn *last;
339 /* We do not allow empty function segments. */
340 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
342 last = VEC_last (btrace_insn_s, bfun->insn);
345 if (gdbarch_insn_is_call (gdbarch, pc))
352 /* Add a continuation segment for a function into which we return.
353 PREV is the chronologically preceding function segment.
354 MFUN and FUN are the symbol information we have for this function. */
356 static struct btrace_function *
357 ftrace_new_return (struct gdbarch *gdbarch,
358 struct btrace_function *prev,
359 struct minimal_symbol *mfun,
362 struct btrace_function *bfun, *caller;
364 bfun = ftrace_new_function (prev, mfun, fun);
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
378 /* Maintain the function level. */
379 bfun->level = caller->level;
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
385 ftrace_debug (bfun, "new return");
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
392 /* Let's search for some actual call. */
393 caller = ftrace_find_call (gdbarch, prev->up);
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
403 /* We maintain levels for a series of returns for which we have
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413 ftrace_debug (bfun, "new return - no caller");
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
421 ftrace_debug (bfun, "new return - unknown caller");
428 /* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
432 static struct btrace_function *
433 ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
437 struct btrace_function *bfun;
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
443 /* We keep the function level. */
444 bfun->level = prev->level;
446 ftrace_debug (bfun, "new switch");
451 /* Update BFUN with respect to the instruction at PC. This may create new
453 Return the chronologically latest function segment, never NULL. */
455 static struct btrace_function *
456 ftrace_update_function (struct gdbarch *gdbarch,
457 struct btrace_function *bfun, CORE_ADDR pc)
459 struct bound_minimal_symbol bmfun;
460 struct minimal_symbol *mfun;
462 struct btrace_insn *last;
464 /* Try to determine the function we're in. We use both types of symbols
465 to avoid surprises when we sometimes get a full symbol and sometimes
466 only a minimal symbol. */
467 fun = find_pc_function (pc);
468 bmfun = lookup_minimal_symbol_by_pc (pc);
471 if (fun == NULL && mfun == NULL)
472 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
474 /* If we didn't have a function before, we create one. */
476 return ftrace_new_function (bfun, mfun, fun);
478 /* Check the last instruction, if we have one.
479 We do this check first, since it allows us to fill in the call stack
480 links in addition to the normal flow links. */
482 if (!VEC_empty (btrace_insn_s, bfun->insn))
483 last = VEC_last (btrace_insn_s, bfun->insn);
491 /* Check for returns. */
492 if (gdbarch_insn_is_ret (gdbarch, lpc))
493 return ftrace_new_return (gdbarch, bfun, mfun, fun);
495 /* Check for calls. */
496 if (gdbarch_insn_is_call (gdbarch, lpc))
500 size = gdb_insn_length (gdbarch, lpc);
502 /* Ignore calls to the next instruction. They are used for PIC. */
503 if (lpc + size != pc)
504 return ftrace_new_call (bfun, mfun, fun);
508 /* Check if we're switching functions for some other reason. */
509 if (ftrace_function_switched (bfun, mfun, fun))
511 DEBUG_FTRACE ("switching from %s in %s at %s",
512 ftrace_print_insn_addr (last),
513 ftrace_print_function_name (bfun),
514 ftrace_print_filename (bfun));
518 CORE_ADDR start, lpc;
520 start = get_pc_function_start (pc);
522 /* If we can't determine the function for PC, we treat a jump at
523 the end of the block as tail call. */
529 /* Jumps indicate optimized tail calls. */
530 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
531 return ftrace_new_tailcall (bfun, mfun, fun);
534 return ftrace_new_switch (bfun, mfun, fun);
540 /* Update BFUN's source range with respect to the instruction at PC. */
543 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
545 struct symtab_and_line sal;
546 const char *fullname;
548 sal = find_pc_line (pc, 0);
549 if (sal.symtab == NULL || sal.line == 0)
551 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
555 /* Check if we switched files. This could happen if, say, a macro that
556 is defined in another file is expanded here. */
557 fullname = symtab_to_fullname (sal.symtab);
558 if (ftrace_skip_file (bfun, fullname))
560 DEBUG_FTRACE ("ignoring file at %s, file=%s",
561 core_addr_to_string_nz (pc), fullname);
565 /* Update the line range. */
566 bfun->lbegin = min (bfun->lbegin, sal.line);
567 bfun->lend = max (bfun->lend, sal.line);
569 if (record_debug > 1)
570 ftrace_debug (bfun, "update lines");
573 /* Add the instruction at PC to BFUN's instructions. */
576 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
578 struct btrace_insn *insn;
580 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
583 if (record_debug > 1)
584 ftrace_debug (bfun, "update insn");
587 /* Compute the function branch trace from a block branch trace BTRACE for
588 a thread given by BTINFO. */
591 btrace_compute_ftrace (struct btrace_thread_info *btinfo,
592 VEC (btrace_block_s) *btrace)
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
599 DEBUG ("compute ftrace");
601 gdbarch = target_gdbarch ();
605 blk = VEC_length (btrace_block_s, btrace);
609 btrace_block_s *block;
614 block = VEC_index (btrace_block_s, btrace, blk);
621 /* We should hit the end of the block. Warn if we went too far. */
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
629 end = ftrace_update_function (gdbarch, end, pc);
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
636 level = min (level, end->level);
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
645 size = gdb_insn_length (gdbarch, pc);
647 /* Make sure we terminate if we fail to compute the size. */
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
664 level = min (level, end->level);
668 btinfo->begin = begin;
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
680 btrace_enable (struct thread_info *tp)
682 if (tp->btrace.target != NULL)
685 if (!target_supports_btrace ())
686 error (_("Target does not support branch tracing."));
688 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
690 tp->btrace.target = target_enable_btrace (tp->ptid);
696 btrace_disable (struct thread_info *tp)
698 struct btrace_thread_info *btp = &tp->btrace;
701 if (btp->target == NULL)
704 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
706 target_disable_btrace (btp->target);
715 btrace_teardown (struct thread_info *tp)
717 struct btrace_thread_info *btp = &tp->btrace;
720 if (btp->target == NULL)
723 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
725 target_teardown_btrace (btp->target);
734 btrace_fetch (struct thread_info *tp)
736 struct btrace_thread_info *btinfo;
737 VEC (btrace_block_s) *btrace;
738 struct cleanup *cleanup;
740 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
742 btinfo = &tp->btrace;
743 if (btinfo->target == NULL)
746 btrace = target_read_btrace (btinfo->target, BTRACE_READ_NEW);
747 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
749 if (!VEC_empty (btrace_block_s, btrace))
752 btrace_compute_ftrace (btinfo, btrace);
755 do_cleanups (cleanup);
761 btrace_clear (struct thread_info *tp)
763 struct btrace_thread_info *btinfo;
764 struct btrace_function *it, *trash;
766 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
768 /* Make sure btrace frames that may hold a pointer into the branch
769 trace data are destroyed. */
770 reinit_frame_cache ();
772 btinfo = &tp->btrace;
783 btinfo->begin = NULL;
786 xfree (btinfo->insn_history);
787 xfree (btinfo->call_history);
788 xfree (btinfo->replay);
790 btinfo->insn_history = NULL;
791 btinfo->call_history = NULL;
792 btinfo->replay = NULL;
798 btrace_free_objfile (struct objfile *objfile)
800 struct thread_info *tp;
802 DEBUG ("free objfile");
808 #if defined (HAVE_LIBEXPAT)
810 /* Check the btrace document version. */
813 check_xml_btrace_version (struct gdb_xml_parser *parser,
814 const struct gdb_xml_element *element,
815 void *user_data, VEC (gdb_xml_value_s) *attributes)
817 const char *version = xml_find_attribute (attributes, "version")->value;
819 if (strcmp (version, "1.0") != 0)
820 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
823 /* Parse a btrace "block" xml record. */
826 parse_xml_btrace_block (struct gdb_xml_parser *parser,
827 const struct gdb_xml_element *element,
828 void *user_data, VEC (gdb_xml_value_s) *attributes)
830 VEC (btrace_block_s) **btrace;
831 struct btrace_block *block;
832 ULONGEST *begin, *end;
835 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
837 begin = xml_find_attribute (attributes, "begin")->value;
838 end = xml_find_attribute (attributes, "end")->value;
840 block->begin = *begin;
844 static const struct gdb_xml_attribute block_attributes[] = {
845 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
846 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
847 { NULL, GDB_XML_AF_NONE, NULL, NULL }
850 static const struct gdb_xml_attribute btrace_attributes[] = {
851 { "version", GDB_XML_AF_NONE, NULL, NULL },
852 { NULL, GDB_XML_AF_NONE, NULL, NULL }
855 static const struct gdb_xml_element btrace_children[] = {
856 { "block", block_attributes, NULL,
857 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
858 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
861 static const struct gdb_xml_element btrace_elements[] = {
862 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
863 check_xml_btrace_version, NULL },
864 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
867 #endif /* defined (HAVE_LIBEXPAT) */
871 VEC (btrace_block_s) *
872 parse_xml_btrace (const char *buffer)
874 VEC (btrace_block_s) *btrace = NULL;
875 struct cleanup *cleanup;
878 #if defined (HAVE_LIBEXPAT)
880 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
881 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
885 do_cleanups (cleanup);
889 /* Keep parse results. */
890 discard_cleanups (cleanup);
892 #else /* !defined (HAVE_LIBEXPAT) */
894 error (_("Cannot process branch trace. XML parsing is not supported."));
896 #endif /* !defined (HAVE_LIBEXPAT) */
903 const struct btrace_insn *
904 btrace_insn_get (const struct btrace_insn_iterator *it)
906 const struct btrace_function *bfun;
907 unsigned int index, end;
912 /* The index is within the bounds of this function's instruction vector. */
913 end = VEC_length (btrace_insn_s, bfun->insn);
914 gdb_assert (0 < end);
915 gdb_assert (index < end);
917 return VEC_index (btrace_insn_s, bfun->insn, index);
923 btrace_insn_number (const struct btrace_insn_iterator *it)
925 const struct btrace_function *bfun;
928 return bfun->insn_offset + it->index;
934 btrace_insn_begin (struct btrace_insn_iterator *it,
935 const struct btrace_thread_info *btinfo)
937 const struct btrace_function *bfun;
939 bfun = btinfo->begin;
941 error (_("No trace."));
950 btrace_insn_end (struct btrace_insn_iterator *it,
951 const struct btrace_thread_info *btinfo)
953 const struct btrace_function *bfun;
958 error (_("No trace."));
960 /* The last instruction in the last function is the current instruction.
961 We point to it - it is one past the end of the execution trace. */
962 length = VEC_length (btrace_insn_s, bfun->insn);
965 it->index = length - 1;
971 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
973 const struct btrace_function *bfun;
974 unsigned int index, steps;
982 unsigned int end, space, adv;
984 end = VEC_length (btrace_insn_s, bfun->insn);
986 gdb_assert (0 < end);
987 gdb_assert (index < end);
989 /* Compute the number of instructions remaining in this segment. */
992 /* Advance the iterator as far as possible within this segment. */
993 adv = min (space, stride);
998 /* Move to the next function if we're at the end of this one. */
1001 const struct btrace_function *next;
1003 next = bfun->flow.next;
1006 /* We stepped past the last function.
1008 Let's adjust the index to point to the last instruction in
1009 the previous function. */
1015 /* We now point to the first instruction in the new function. */
1020 /* We did make progress. */
1021 gdb_assert (adv > 0);
1024 /* Update the iterator. */
1025 it->function = bfun;
1034 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1036 const struct btrace_function *bfun;
1037 unsigned int index, steps;
1039 bfun = it->function;
1047 /* Move to the previous function if we're at the start of this one. */
1050 const struct btrace_function *prev;
1052 prev = bfun->flow.prev;
1056 /* We point to one after the last instruction in the new function. */
1058 index = VEC_length (btrace_insn_s, bfun->insn);
1060 /* There is at least one instruction in this function segment. */
1061 gdb_assert (index > 0);
1064 /* Advance the iterator as far as possible within this segment. */
1065 adv = min (index, stride);
1070 /* We did make progress. */
1071 gdb_assert (adv > 0);
1074 /* Update the iterator. */
1075 it->function = bfun;
1084 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1085 const struct btrace_insn_iterator *rhs)
1087 unsigned int lnum, rnum;
1089 lnum = btrace_insn_number (lhs);
1090 rnum = btrace_insn_number (rhs);
1092 return (int) (lnum - rnum);
1098 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1099 const struct btrace_thread_info *btinfo,
1100 unsigned int number)
1102 const struct btrace_function *bfun;
1105 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1106 if (bfun->insn_offset <= number)
1112 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1116 it->function = bfun;
1117 it->index = number - bfun->insn_offset;
1124 const struct btrace_function *
1125 btrace_call_get (const struct btrace_call_iterator *it)
1127 return it->function;
1133 btrace_call_number (const struct btrace_call_iterator *it)
1135 const struct btrace_thread_info *btinfo;
1136 const struct btrace_function *bfun;
1139 btinfo = it->btinfo;
1140 bfun = it->function;
1142 return bfun->number;
1144 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1145 number of the last function. */
1147 insns = VEC_length (btrace_insn_s, bfun->insn);
1149 /* If the function contains only a single instruction (i.e. the current
1150 instruction), it will be skipped and its number is already the number
1153 return bfun->number;
1155 /* Otherwise, return one more than the number of the last function. */
1156 return bfun->number + 1;
1162 btrace_call_begin (struct btrace_call_iterator *it,
1163 const struct btrace_thread_info *btinfo)
1165 const struct btrace_function *bfun;
1167 bfun = btinfo->begin;
1169 error (_("No trace."));
1171 it->btinfo = btinfo;
1172 it->function = bfun;
1178 btrace_call_end (struct btrace_call_iterator *it,
1179 const struct btrace_thread_info *btinfo)
1181 const struct btrace_function *bfun;
1185 error (_("No trace."));
1187 it->btinfo = btinfo;
1188 it->function = NULL;
1194 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1196 const struct btrace_function *bfun;
1199 bfun = it->function;
1201 while (bfun != NULL)
1203 const struct btrace_function *next;
1206 next = bfun->flow.next;
1209 /* Ignore the last function if it only contains a single
1210 (i.e. the current) instruction. */
1211 insns = VEC_length (btrace_insn_s, bfun->insn);
1216 if (stride == steps)
1223 it->function = bfun;
1230 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1232 const struct btrace_thread_info *btinfo;
1233 const struct btrace_function *bfun;
1236 bfun = it->function;
1243 btinfo = it->btinfo;
1248 /* Ignore the last function if it only contains a single
1249 (i.e. the current) instruction. */
1250 insns = VEC_length (btrace_insn_s, bfun->insn);
1252 bfun = bfun->flow.prev;
1260 while (steps < stride)
1262 const struct btrace_function *prev;
1264 prev = bfun->flow.prev;
1272 it->function = bfun;
1279 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1280 const struct btrace_call_iterator *rhs)
1282 unsigned int lnum, rnum;
1284 lnum = btrace_call_number (lhs);
1285 rnum = btrace_call_number (rhs);
1287 return (int) (lnum - rnum);
1293 btrace_find_call_by_number (struct btrace_call_iterator *it,
1294 const struct btrace_thread_info *btinfo,
1295 unsigned int number)
1297 const struct btrace_function *bfun;
1299 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1303 bnum = bfun->number;
1306 it->btinfo = btinfo;
1307 it->function = bfun;
1311 /* Functions are ordered and numbered consecutively. We could bail out
1312 earlier. On the other hand, it is very unlikely that we search for
1313 a nonexistent function. */
1322 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1323 const struct btrace_insn_iterator *begin,
1324 const struct btrace_insn_iterator *end)
1326 if (btinfo->insn_history == NULL)
1327 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1329 btinfo->insn_history->begin = *begin;
1330 btinfo->insn_history->end = *end;
1336 btrace_set_call_history (struct btrace_thread_info *btinfo,
1337 const struct btrace_call_iterator *begin,
1338 const struct btrace_call_iterator *end)
1340 gdb_assert (begin->btinfo == end->btinfo);
1342 if (btinfo->call_history == NULL)
1343 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1345 btinfo->call_history->begin = *begin;
1346 btinfo->call_history->end = *end;
1352 btrace_is_replaying (struct thread_info *tp)
1354 return tp->btrace.replay != NULL;