1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "gdbthread.h"
24 #include "exceptions.h"
31 #include "filenames.h"
32 #include "xml-support.h"
34 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
35 when used in if statements. */
37 #define DEBUG(msg, args...) \
40 if (record_debug != 0) \
41 fprintf_unfiltered (gdb_stdlog, \
42 "[btrace] " msg "\n", ##args); \
46 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48 /* Return the function name of a recorded function segment for printing.
49 This function never returns NULL. */
52 ftrace_print_function_name (const struct btrace_function *bfun)
54 struct minimal_symbol *msym;
61 return SYMBOL_PRINT_NAME (sym);
64 return SYMBOL_PRINT_NAME (msym);
69 /* Return the file name of a recorded function segment for printing.
70 This function never returns NULL. */
73 ftrace_print_filename (const struct btrace_function *bfun)
81 filename = symtab_to_filename_for_display (sym->symtab);
83 filename = "<unknown>";
88 /* Return a string representation of the address of an instruction.
89 This function never returns NULL. */
92 ftrace_print_insn_addr (const struct btrace_insn *insn)
97 return core_addr_to_string_nz (insn->pc);
100 /* Print an ftrace debug status message. */
103 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 const char *fun, *file;
106 unsigned int ibegin, iend;
107 int lbegin, lend, level;
109 fun = ftrace_print_function_name (bfun);
110 file = ftrace_print_filename (bfun);
113 lbegin = bfun->lbegin;
116 ibegin = bfun->insn_offset;
117 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
119 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
120 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
124 /* Return non-zero if BFUN does not match MFUN and FUN,
125 return zero otherwise. */
128 ftrace_function_switched (const struct btrace_function *bfun,
129 const struct minimal_symbol *mfun,
130 const struct symbol *fun)
132 struct minimal_symbol *msym;
138 /* If the minimal symbol changed, we certainly switched functions. */
139 if (mfun != NULL && msym != NULL
140 && strcmp (SYMBOL_LINKAGE_NAME (mfun), SYMBOL_LINKAGE_NAME (msym)) != 0)
143 /* If the symbol changed, we certainly switched functions. */
144 if (fun != NULL && sym != NULL)
146 const char *bfname, *fname;
148 /* Check the function name. */
149 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
152 /* Check the location of those functions, as well. */
153 bfname = symtab_to_fullname (sym->symtab);
154 fname = symtab_to_fullname (fun->symtab);
155 if (filename_cmp (fname, bfname) != 0)
159 /* If we lost symbol information, we switched functions. */
160 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
163 /* If we gained symbol information, we switched functions. */
164 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
170 /* Return non-zero if we should skip this file when generating the function
171 call history, zero otherwise.
172 We would want to do that if, say, a macro that is defined in another file
173 is expanded in this function. */
176 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
185 bfile = symtab_to_fullname (sym->symtab);
187 return (filename_cmp (bfile, fullname) != 0);
190 /* Allocate and initialize a new branch trace function segment.
191 PREV is the chronologically preceding function segment.
192 MFUN and FUN are the symbol information we have for this function. */
194 static struct btrace_function *
195 ftrace_new_function (struct btrace_function *prev,
196 struct minimal_symbol *mfun,
199 struct btrace_function *bfun;
201 bfun = xzalloc (sizeof (*bfun));
205 bfun->flow.prev = prev;
207 /* We start with the identities of min and max, respectively. */
208 bfun->lbegin = INT_MAX;
209 bfun->lend = INT_MIN;
213 /* Start counting at one. */
215 bfun->insn_offset = 1;
219 gdb_assert (prev->flow.next == NULL);
220 prev->flow.next = bfun;
222 bfun->number = prev->number + 1;
223 bfun->insn_offset = (prev->insn_offset
224 + VEC_length (btrace_insn_s, prev->insn));
230 /* Update the UP field of a function segment. */
233 ftrace_update_caller (struct btrace_function *bfun,
234 struct btrace_function *caller,
235 enum btrace_function_flag flags)
237 if (bfun->up != NULL)
238 ftrace_debug (bfun, "updating caller");
243 ftrace_debug (bfun, "set caller");
246 /* Fix up the caller for all segments of a function. */
249 ftrace_fixup_caller (struct btrace_function *bfun,
250 struct btrace_function *caller,
251 enum btrace_function_flag flags)
253 struct btrace_function *prev, *next;
255 ftrace_update_caller (bfun, caller, flags);
257 /* Update all function segments belonging to the same function. */
258 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
259 ftrace_update_caller (prev, caller, flags);
261 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
262 ftrace_update_caller (next, caller, flags);
265 /* Add a new function segment for a call.
266 CALLER is the chronologically preceding function segment.
267 MFUN and FUN are the symbol information we have for this function. */
269 static struct btrace_function *
270 ftrace_new_call (struct btrace_function *caller,
271 struct minimal_symbol *mfun,
274 struct btrace_function *bfun;
276 bfun = ftrace_new_function (caller, mfun, fun);
278 bfun->level = caller->level + 1;
280 ftrace_debug (bfun, "new call");
285 /* Add a new function segment for a tail call.
286 CALLER is the chronologically preceding function segment.
287 MFUN and FUN are the symbol information we have for this function. */
289 static struct btrace_function *
290 ftrace_new_tailcall (struct btrace_function *caller,
291 struct minimal_symbol *mfun,
294 struct btrace_function *bfun;
296 bfun = ftrace_new_function (caller, mfun, fun);
298 bfun->level = caller->level + 1;
299 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
301 ftrace_debug (bfun, "new tail call");
306 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
307 symbol information. */
309 static struct btrace_function *
310 ftrace_find_caller (struct btrace_function *bfun,
311 struct minimal_symbol *mfun,
314 for (; bfun != NULL; bfun = bfun->up)
316 /* Skip functions with incompatible symbol information. */
317 if (ftrace_function_switched (bfun, mfun, fun))
320 /* This is the function segment we're looking for. */
327 /* Find the innermost caller in the back trace of BFUN, skipping all
328 function segments that do not end with a call instruction (e.g.
329 tail calls ending with a jump). */
331 static struct btrace_function *
332 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
334 for (; bfun != NULL; bfun = bfun->up)
336 struct btrace_insn *last;
339 /* We do not allow empty function segments. */
340 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
342 last = VEC_last (btrace_insn_s, bfun->insn);
345 if (gdbarch_insn_is_call (gdbarch, pc))
352 /* Add a continuation segment for a function into which we return.
353 PREV is the chronologically preceding function segment.
354 MFUN and FUN are the symbol information we have for this function. */
356 static struct btrace_function *
357 ftrace_new_return (struct gdbarch *gdbarch,
358 struct btrace_function *prev,
359 struct minimal_symbol *mfun,
362 struct btrace_function *bfun, *caller;
364 bfun = ftrace_new_function (prev, mfun, fun);
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
378 /* Maintain the function level. */
379 bfun->level = caller->level;
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
385 ftrace_debug (bfun, "new return");
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
392 /* Let's search for some actual call. */
393 caller = ftrace_find_call (gdbarch, prev->up);
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
403 /* We maintain levels for a series of returns for which we have
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413 ftrace_debug (bfun, "new return - no caller");
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
421 ftrace_debug (bfun, "new return - unknown caller");
428 /* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
432 static struct btrace_function *
433 ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
437 struct btrace_function *bfun;
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
443 /* We keep the function level. */
444 bfun->level = prev->level;
446 ftrace_debug (bfun, "new switch");
451 /* Update BFUN with respect to the instruction at PC. This may create new
453 Return the chronologically latest function segment, never NULL. */
455 static struct btrace_function *
456 ftrace_update_function (struct gdbarch *gdbarch,
457 struct btrace_function *bfun, CORE_ADDR pc)
459 struct bound_minimal_symbol bmfun;
460 struct minimal_symbol *mfun;
462 struct btrace_insn *last;
464 /* Try to determine the function we're in. We use both types of symbols
465 to avoid surprises when we sometimes get a full symbol and sometimes
466 only a minimal symbol. */
467 fun = find_pc_function (pc);
468 bmfun = lookup_minimal_symbol_by_pc (pc);
471 if (fun == NULL && mfun == NULL)
472 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
474 /* If we didn't have a function before, we create one. */
476 return ftrace_new_function (bfun, mfun, fun);
478 /* Check the last instruction, if we have one.
479 We do this check first, since it allows us to fill in the call stack
480 links in addition to the normal flow links. */
482 if (!VEC_empty (btrace_insn_s, bfun->insn))
483 last = VEC_last (btrace_insn_s, bfun->insn);
491 /* Check for returns. */
492 if (gdbarch_insn_is_ret (gdbarch, lpc))
493 return ftrace_new_return (gdbarch, bfun, mfun, fun);
495 /* Check for calls. */
496 if (gdbarch_insn_is_call (gdbarch, lpc))
500 size = gdb_insn_length (gdbarch, lpc);
502 /* Ignore calls to the next instruction. They are used for PIC. */
503 if (lpc + size != pc)
504 return ftrace_new_call (bfun, mfun, fun);
508 /* Check if we're switching functions for some other reason. */
509 if (ftrace_function_switched (bfun, mfun, fun))
511 DEBUG_FTRACE ("switching from %s in %s at %s",
512 ftrace_print_insn_addr (last),
513 ftrace_print_function_name (bfun),
514 ftrace_print_filename (bfun));
518 CORE_ADDR start, lpc;
520 start = get_pc_function_start (pc);
522 /* If we can't determine the function for PC, we treat a jump at
523 the end of the block as tail call. */
529 /* Jumps indicate optimized tail calls. */
530 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
531 return ftrace_new_tailcall (bfun, mfun, fun);
534 return ftrace_new_switch (bfun, mfun, fun);
540 /* Update BFUN's source range with respect to the instruction at PC. */
543 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
545 struct symtab_and_line sal;
546 const char *fullname;
548 sal = find_pc_line (pc, 0);
549 if (sal.symtab == NULL || sal.line == 0)
551 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
555 /* Check if we switched files. This could happen if, say, a macro that
556 is defined in another file is expanded here. */
557 fullname = symtab_to_fullname (sal.symtab);
558 if (ftrace_skip_file (bfun, fullname))
560 DEBUG_FTRACE ("ignoring file at %s, file=%s",
561 core_addr_to_string_nz (pc), fullname);
565 /* Update the line range. */
566 bfun->lbegin = min (bfun->lbegin, sal.line);
567 bfun->lend = max (bfun->lend, sal.line);
569 if (record_debug > 1)
570 ftrace_debug (bfun, "update lines");
573 /* Add the instruction at PC to BFUN's instructions. */
576 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
578 struct btrace_insn *insn;
580 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
583 if (record_debug > 1)
584 ftrace_debug (bfun, "update insn");
587 /* Compute the function branch trace from a block branch trace BTRACE for
588 a thread given by BTINFO. */
591 btrace_compute_ftrace (struct btrace_thread_info *btinfo,
592 VEC (btrace_block_s) *btrace)
594 struct btrace_function *begin, *end;
595 struct gdbarch *gdbarch;
599 DEBUG ("compute ftrace");
601 gdbarch = target_gdbarch ();
602 begin = btinfo->begin;
604 level = begin != NULL ? -btinfo->level : INT_MAX;
605 blk = VEC_length (btrace_block_s, btrace);
609 btrace_block_s *block;
614 block = VEC_index (btrace_block_s, btrace, blk);
621 /* We should hit the end of the block. Warn if we went too far. */
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
629 end = ftrace_update_function (gdbarch, end, pc);
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
636 level = min (level, end->level);
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
645 size = gdb_insn_length (gdbarch, pc);
647 /* Make sure we terminate if we fail to compute the size. */
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
664 level = min (level, end->level);
668 btinfo->begin = begin;
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
680 btrace_enable (struct thread_info *tp)
682 if (tp->btrace.target != NULL)
685 if (!target_supports_btrace ())
686 error (_("Target does not support branch tracing."));
688 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
690 tp->btrace.target = target_enable_btrace (tp->ptid);
696 btrace_disable (struct thread_info *tp)
698 struct btrace_thread_info *btp = &tp->btrace;
701 if (btp->target == NULL)
704 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
706 target_disable_btrace (btp->target);
715 btrace_teardown (struct thread_info *tp)
717 struct btrace_thread_info *btp = &tp->btrace;
720 if (btp->target == NULL)
723 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
725 target_teardown_btrace (btp->target);
731 /* Adjust the block trace in order to stitch old and new trace together.
732 BTRACE is the new delta trace between the last and the current stop.
733 BTINFO is the old branch trace until the last stop.
734 May modify BTRACE as well as the existing trace in BTINFO.
735 Return 0 on success, -1 otherwise. */
738 btrace_stitch_trace (VEC (btrace_block_s) **btrace,
739 const struct btrace_thread_info *btinfo)
741 struct btrace_function *last_bfun;
742 struct btrace_insn *last_insn;
743 btrace_block_s *first_new_block;
745 /* If we don't have trace, there's nothing to do. */
746 if (VEC_empty (btrace_block_s, *btrace))
749 last_bfun = btinfo->end;
750 gdb_assert (last_bfun != NULL);
752 /* Beware that block trace starts with the most recent block, so the
753 chronologically first block in the new trace is the last block in
754 the new trace's block vector. */
755 first_new_block = VEC_last (btrace_block_s, *btrace);
756 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
758 /* If the current PC at the end of the block is the same as in our current
759 trace, there are two explanations:
760 1. we executed the instruction and some branch brought us back.
761 2. we have not made any progress.
762 In the first case, the delta trace vector should contain at least two
764 In the second case, the delta trace vector should contain exactly one
765 entry for the partial block containing the current PC. Remove it. */
766 if (first_new_block->end == last_insn->pc
767 && VEC_length (btrace_block_s, *btrace) == 1)
769 VEC_pop (btrace_block_s, *btrace);
773 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
774 core_addr_to_string_nz (first_new_block->end));
776 /* Do a simple sanity check to make sure we don't accidentally end up
777 with a bad block. This should not occur in practice. */
778 if (first_new_block->end < last_insn->pc)
780 warning (_("Error while trying to read delta trace. Falling back to "
785 /* We adjust the last block to start at the end of our current trace. */
786 gdb_assert (first_new_block->begin == 0);
787 first_new_block->begin = last_insn->pc;
789 /* We simply pop the last insn so we can insert it again as part of
790 the normal branch trace computation.
791 Since instruction iterators are based on indices in the instructions
792 vector, we don't leave any pointers dangling. */
793 DEBUG ("pruning insn at %s for stitching",
794 ftrace_print_insn_addr (last_insn));
796 VEC_pop (btrace_insn_s, last_bfun->insn);
798 /* The instructions vector may become empty temporarily if this has
799 been the only instruction in this function segment.
800 This violates the invariant but will be remedied shortly by
801 btrace_compute_ftrace when we add the new trace. */
805 /* Clear the branch trace histories in BTINFO. */
808 btrace_clear_history (struct btrace_thread_info *btinfo)
810 xfree (btinfo->insn_history);
811 xfree (btinfo->call_history);
812 xfree (btinfo->replay);
814 btinfo->insn_history = NULL;
815 btinfo->call_history = NULL;
816 btinfo->replay = NULL;
822 btrace_fetch (struct thread_info *tp)
824 struct btrace_thread_info *btinfo;
825 struct btrace_target_info *tinfo;
826 VEC (btrace_block_s) *btrace;
827 struct cleanup *cleanup;
830 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
833 btinfo = &tp->btrace;
834 tinfo = btinfo->target;
838 /* There's no way we could get new trace while replaying.
839 On the other hand, delta trace would return a partial record with the
840 current PC, which is the replay PC, not the last PC, as expected. */
841 if (btinfo->replay != NULL)
844 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
846 /* Let's first try to extend the trace we already have. */
847 if (btinfo->end != NULL)
849 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
852 /* Success. Let's try to stitch the traces together. */
853 errcode = btrace_stitch_trace (&btrace, btinfo);
857 /* We failed to read delta trace. Let's try to read new trace. */
858 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
860 /* If we got any new trace, discard what we have. */
861 if (errcode == 0 && !VEC_empty (btrace_block_s, btrace))
865 /* If we were not able to read the trace, we start over. */
869 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
873 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
875 /* If we were not able to read the branch trace, signal an error. */
877 error (_("Failed to read branch trace."));
879 /* Compute the trace, provided we have any. */
880 if (!VEC_empty (btrace_block_s, btrace))
882 btrace_clear_history (btinfo);
883 btrace_compute_ftrace (btinfo, btrace);
886 do_cleanups (cleanup);
892 btrace_clear (struct thread_info *tp)
894 struct btrace_thread_info *btinfo;
895 struct btrace_function *it, *trash;
897 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
899 /* Make sure btrace frames that may hold a pointer into the branch
900 trace data are destroyed. */
901 reinit_frame_cache ();
903 btinfo = &tp->btrace;
914 btinfo->begin = NULL;
917 btrace_clear_history (btinfo);
923 btrace_free_objfile (struct objfile *objfile)
925 struct thread_info *tp;
927 DEBUG ("free objfile");
933 #if defined (HAVE_LIBEXPAT)
935 /* Check the btrace document version. */
938 check_xml_btrace_version (struct gdb_xml_parser *parser,
939 const struct gdb_xml_element *element,
940 void *user_data, VEC (gdb_xml_value_s) *attributes)
942 const char *version = xml_find_attribute (attributes, "version")->value;
944 if (strcmp (version, "1.0") != 0)
945 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
948 /* Parse a btrace "block" xml record. */
951 parse_xml_btrace_block (struct gdb_xml_parser *parser,
952 const struct gdb_xml_element *element,
953 void *user_data, VEC (gdb_xml_value_s) *attributes)
955 VEC (btrace_block_s) **btrace;
956 struct btrace_block *block;
957 ULONGEST *begin, *end;
960 block = VEC_safe_push (btrace_block_s, *btrace, NULL);
962 begin = xml_find_attribute (attributes, "begin")->value;
963 end = xml_find_attribute (attributes, "end")->value;
965 block->begin = *begin;
969 static const struct gdb_xml_attribute block_attributes[] = {
970 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
971 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
972 { NULL, GDB_XML_AF_NONE, NULL, NULL }
975 static const struct gdb_xml_attribute btrace_attributes[] = {
976 { "version", GDB_XML_AF_NONE, NULL, NULL },
977 { NULL, GDB_XML_AF_NONE, NULL, NULL }
980 static const struct gdb_xml_element btrace_children[] = {
981 { "block", block_attributes, NULL,
982 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
983 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
986 static const struct gdb_xml_element btrace_elements[] = {
987 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
988 check_xml_btrace_version, NULL },
989 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
992 #endif /* defined (HAVE_LIBEXPAT) */
996 VEC (btrace_block_s) *
997 parse_xml_btrace (const char *buffer)
999 VEC (btrace_block_s) *btrace = NULL;
1000 struct cleanup *cleanup;
1003 #if defined (HAVE_LIBEXPAT)
1005 cleanup = make_cleanup (VEC_cleanup (btrace_block_s), &btrace);
1006 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1009 error (_("Error parsing branch trace."));
1011 /* Keep parse results. */
1012 discard_cleanups (cleanup);
1014 #else /* !defined (HAVE_LIBEXPAT) */
1016 error (_("Cannot process branch trace. XML parsing is not supported."));
1018 #endif /* !defined (HAVE_LIBEXPAT) */
1025 const struct btrace_insn *
1026 btrace_insn_get (const struct btrace_insn_iterator *it)
1028 const struct btrace_function *bfun;
1029 unsigned int index, end;
1032 bfun = it->function;
1034 /* The index is within the bounds of this function's instruction vector. */
1035 end = VEC_length (btrace_insn_s, bfun->insn);
1036 gdb_assert (0 < end);
1037 gdb_assert (index < end);
1039 return VEC_index (btrace_insn_s, bfun->insn, index);
1045 btrace_insn_number (const struct btrace_insn_iterator *it)
1047 const struct btrace_function *bfun;
1049 bfun = it->function;
1050 return bfun->insn_offset + it->index;
1056 btrace_insn_begin (struct btrace_insn_iterator *it,
1057 const struct btrace_thread_info *btinfo)
1059 const struct btrace_function *bfun;
1061 bfun = btinfo->begin;
1063 error (_("No trace."));
1065 it->function = bfun;
1072 btrace_insn_end (struct btrace_insn_iterator *it,
1073 const struct btrace_thread_info *btinfo)
1075 const struct btrace_function *bfun;
1076 unsigned int length;
1080 error (_("No trace."));
1082 /* The last instruction in the last function is the current instruction.
1083 We point to it - it is one past the end of the execution trace. */
1084 length = VEC_length (btrace_insn_s, bfun->insn);
1086 it->function = bfun;
1087 it->index = length - 1;
1093 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1095 const struct btrace_function *bfun;
1096 unsigned int index, steps;
1098 bfun = it->function;
1104 unsigned int end, space, adv;
1106 end = VEC_length (btrace_insn_s, bfun->insn);
1108 gdb_assert (0 < end);
1109 gdb_assert (index < end);
1111 /* Compute the number of instructions remaining in this segment. */
1112 space = end - index;
1114 /* Advance the iterator as far as possible within this segment. */
1115 adv = min (space, stride);
1120 /* Move to the next function if we're at the end of this one. */
1123 const struct btrace_function *next;
1125 next = bfun->flow.next;
1128 /* We stepped past the last function.
1130 Let's adjust the index to point to the last instruction in
1131 the previous function. */
1137 /* We now point to the first instruction in the new function. */
1142 /* We did make progress. */
1143 gdb_assert (adv > 0);
1146 /* Update the iterator. */
1147 it->function = bfun;
1156 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1158 const struct btrace_function *bfun;
1159 unsigned int index, steps;
1161 bfun = it->function;
1169 /* Move to the previous function if we're at the start of this one. */
1172 const struct btrace_function *prev;
1174 prev = bfun->flow.prev;
1178 /* We point to one after the last instruction in the new function. */
1180 index = VEC_length (btrace_insn_s, bfun->insn);
1182 /* There is at least one instruction in this function segment. */
1183 gdb_assert (index > 0);
1186 /* Advance the iterator as far as possible within this segment. */
1187 adv = min (index, stride);
1192 /* We did make progress. */
1193 gdb_assert (adv > 0);
1196 /* Update the iterator. */
1197 it->function = bfun;
1206 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1207 const struct btrace_insn_iterator *rhs)
1209 unsigned int lnum, rnum;
1211 lnum = btrace_insn_number (lhs);
1212 rnum = btrace_insn_number (rhs);
1214 return (int) (lnum - rnum);
1220 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1221 const struct btrace_thread_info *btinfo,
1222 unsigned int number)
1224 const struct btrace_function *bfun;
1227 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1228 if (bfun->insn_offset <= number)
1234 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1238 it->function = bfun;
1239 it->index = number - bfun->insn_offset;
1246 const struct btrace_function *
1247 btrace_call_get (const struct btrace_call_iterator *it)
1249 return it->function;
1255 btrace_call_number (const struct btrace_call_iterator *it)
1257 const struct btrace_thread_info *btinfo;
1258 const struct btrace_function *bfun;
1261 btinfo = it->btinfo;
1262 bfun = it->function;
1264 return bfun->number;
1266 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1267 number of the last function. */
1269 insns = VEC_length (btrace_insn_s, bfun->insn);
1271 /* If the function contains only a single instruction (i.e. the current
1272 instruction), it will be skipped and its number is already the number
1275 return bfun->number;
1277 /* Otherwise, return one more than the number of the last function. */
1278 return bfun->number + 1;
1284 btrace_call_begin (struct btrace_call_iterator *it,
1285 const struct btrace_thread_info *btinfo)
1287 const struct btrace_function *bfun;
1289 bfun = btinfo->begin;
1291 error (_("No trace."));
1293 it->btinfo = btinfo;
1294 it->function = bfun;
1300 btrace_call_end (struct btrace_call_iterator *it,
1301 const struct btrace_thread_info *btinfo)
1303 const struct btrace_function *bfun;
1307 error (_("No trace."));
1309 it->btinfo = btinfo;
1310 it->function = NULL;
1316 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1318 const struct btrace_function *bfun;
1321 bfun = it->function;
1323 while (bfun != NULL)
1325 const struct btrace_function *next;
1328 next = bfun->flow.next;
1331 /* Ignore the last function if it only contains a single
1332 (i.e. the current) instruction. */
1333 insns = VEC_length (btrace_insn_s, bfun->insn);
1338 if (stride == steps)
1345 it->function = bfun;
1352 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1354 const struct btrace_thread_info *btinfo;
1355 const struct btrace_function *bfun;
1358 bfun = it->function;
1365 btinfo = it->btinfo;
1370 /* Ignore the last function if it only contains a single
1371 (i.e. the current) instruction. */
1372 insns = VEC_length (btrace_insn_s, bfun->insn);
1374 bfun = bfun->flow.prev;
1382 while (steps < stride)
1384 const struct btrace_function *prev;
1386 prev = bfun->flow.prev;
1394 it->function = bfun;
1401 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1402 const struct btrace_call_iterator *rhs)
1404 unsigned int lnum, rnum;
1406 lnum = btrace_call_number (lhs);
1407 rnum = btrace_call_number (rhs);
1409 return (int) (lnum - rnum);
1415 btrace_find_call_by_number (struct btrace_call_iterator *it,
1416 const struct btrace_thread_info *btinfo,
1417 unsigned int number)
1419 const struct btrace_function *bfun;
1421 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1425 bnum = bfun->number;
1428 it->btinfo = btinfo;
1429 it->function = bfun;
1433 /* Functions are ordered and numbered consecutively. We could bail out
1434 earlier. On the other hand, it is very unlikely that we search for
1435 a nonexistent function. */
1444 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1445 const struct btrace_insn_iterator *begin,
1446 const struct btrace_insn_iterator *end)
1448 if (btinfo->insn_history == NULL)
1449 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1451 btinfo->insn_history->begin = *begin;
1452 btinfo->insn_history->end = *end;
1458 btrace_set_call_history (struct btrace_thread_info *btinfo,
1459 const struct btrace_call_iterator *begin,
1460 const struct btrace_call_iterator *end)
1462 gdb_assert (begin->btinfo == end->btinfo);
1464 if (btinfo->call_history == NULL)
1465 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1467 btinfo->call_history->begin = *begin;
1468 btinfo->call_history->end = *end;
1474 btrace_is_replaying (struct thread_info *tp)
1476 return tp->btrace.replay != NULL;