1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
38 #define DEBUG(msg, args...) \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
53 ftrace_print_function_name (const struct btrace_function *bfun)
55 struct minimal_symbol *msym;
62 return SYMBOL_PRINT_NAME (sym);
65 return MSYMBOL_PRINT_NAME (msym);
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
74 ftrace_print_filename (const struct btrace_function *bfun)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
84 filename = "<unknown>";
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
98 return core_addr_to_string_nz (insn->pc);
101 /* Print an ftrace debug status message. */
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
114 lbegin = bfun->lbegin;
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
125 /* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
129 ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
133 struct minimal_symbol *msym;
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
147 const char *bfname, *fname;
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
153 /* Check the location of those functions, as well. */
154 bfname = symtab_to_fullname (symbol_symtab (sym));
155 fname = symtab_to_fullname (symbol_symtab (fun));
156 if (filename_cmp (fname, bfname) != 0)
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
171 /* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
177 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
186 bfile = symtab_to_fullname (symbol_symtab (sym));
188 return (filename_cmp (bfile, fullname) != 0);
191 /* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
195 static struct btrace_function *
196 ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
200 struct btrace_function *bfun;
202 bfun = xzalloc (sizeof (*bfun));
206 bfun->flow.prev = prev;
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
214 /* Start counting at one. */
216 bfun->insn_offset = 1;
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
231 /* Update the UP field of a function segment. */
234 ftrace_update_caller (struct btrace_function *bfun,
235 struct btrace_function *caller,
236 enum btrace_function_flag flags)
238 if (bfun->up != NULL)
239 ftrace_debug (bfun, "updating caller");
244 ftrace_debug (bfun, "set caller");
247 /* Fix up the caller for all segments of a function. */
250 ftrace_fixup_caller (struct btrace_function *bfun,
251 struct btrace_function *caller,
252 enum btrace_function_flag flags)
254 struct btrace_function *prev, *next;
256 ftrace_update_caller (bfun, caller, flags);
258 /* Update all function segments belonging to the same function. */
259 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
260 ftrace_update_caller (prev, caller, flags);
262 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
263 ftrace_update_caller (next, caller, flags);
266 /* Add a new function segment for a call.
267 CALLER is the chronologically preceding function segment.
268 MFUN and FUN are the symbol information we have for this function. */
270 static struct btrace_function *
271 ftrace_new_call (struct btrace_function *caller,
272 struct minimal_symbol *mfun,
275 struct btrace_function *bfun;
277 bfun = ftrace_new_function (caller, mfun, fun);
279 bfun->level = caller->level + 1;
281 ftrace_debug (bfun, "new call");
286 /* Add a new function segment for a tail call.
287 CALLER is the chronologically preceding function segment.
288 MFUN and FUN are the symbol information we have for this function. */
290 static struct btrace_function *
291 ftrace_new_tailcall (struct btrace_function *caller,
292 struct minimal_symbol *mfun,
295 struct btrace_function *bfun;
297 bfun = ftrace_new_function (caller, mfun, fun);
299 bfun->level = caller->level + 1;
300 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
302 ftrace_debug (bfun, "new tail call");
307 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
308 symbol information. */
310 static struct btrace_function *
311 ftrace_find_caller (struct btrace_function *bfun,
312 struct minimal_symbol *mfun,
315 for (; bfun != NULL; bfun = bfun->up)
317 /* Skip functions with incompatible symbol information. */
318 if (ftrace_function_switched (bfun, mfun, fun))
321 /* This is the function segment we're looking for. */
328 /* Find the innermost caller in the back trace of BFUN, skipping all
329 function segments that do not end with a call instruction (e.g.
330 tail calls ending with a jump). */
332 static struct btrace_function *
333 ftrace_find_call (struct gdbarch *gdbarch, struct btrace_function *bfun)
335 for (; bfun != NULL; bfun = bfun->up)
337 struct btrace_insn *last;
340 /* We do not allow empty function segments. */
341 gdb_assert (!VEC_empty (btrace_insn_s, bfun->insn));
343 last = VEC_last (btrace_insn_s, bfun->insn);
346 if (gdbarch_insn_is_call (gdbarch, pc))
353 /* Add a continuation segment for a function into which we return.
354 PREV is the chronologically preceding function segment.
355 MFUN and FUN are the symbol information we have for this function. */
357 static struct btrace_function *
358 ftrace_new_return (struct gdbarch *gdbarch,
359 struct btrace_function *prev,
360 struct minimal_symbol *mfun,
363 struct btrace_function *bfun, *caller;
365 bfun = ftrace_new_function (prev, mfun, fun);
367 /* It is important to start at PREV's caller. Otherwise, we might find
368 PREV itself, if PREV is a recursive function. */
369 caller = ftrace_find_caller (prev->up, mfun, fun);
372 /* The caller of PREV is the preceding btrace function segment in this
373 function instance. */
374 gdb_assert (caller->segment.next == NULL);
376 caller->segment.next = bfun;
377 bfun->segment.prev = caller;
379 /* Maintain the function level. */
380 bfun->level = caller->level;
382 /* Maintain the call stack. */
383 bfun->up = caller->up;
384 bfun->flags = caller->flags;
386 ftrace_debug (bfun, "new return");
390 /* We did not find a caller. This could mean that something went
391 wrong or that the call is simply not included in the trace. */
393 /* Let's search for some actual call. */
394 caller = ftrace_find_call (gdbarch, prev->up);
397 /* There is no call in PREV's back trace. We assume that the
398 branch trace did not include it. */
400 /* Let's find the topmost call function - this skips tail calls. */
401 while (prev->up != NULL)
404 /* We maintain levels for a series of returns for which we have
406 We start at the preceding function's level in case this has
407 already been a return for which we have not seen the call.
408 We start at level 0 otherwise, to handle tail calls correctly. */
409 bfun->level = min (0, prev->level) - 1;
411 /* Fix up the call stack for PREV. */
412 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
414 ftrace_debug (bfun, "new return - no caller");
418 /* There is a call in PREV's back trace to which we should have
419 returned. Let's remain at this level. */
420 bfun->level = prev->level;
422 ftrace_debug (bfun, "new return - unknown caller");
429 /* Add a new function segment for a function switch.
430 PREV is the chronologically preceding function segment.
431 MFUN and FUN are the symbol information we have for this function. */
433 static struct btrace_function *
434 ftrace_new_switch (struct btrace_function *prev,
435 struct minimal_symbol *mfun,
438 struct btrace_function *bfun;
440 /* This is an unexplained function switch. The call stack will likely
441 be wrong at this point. */
442 bfun = ftrace_new_function (prev, mfun, fun);
444 /* We keep the function level. */
445 bfun->level = prev->level;
447 ftrace_debug (bfun, "new switch");
452 /* Update BFUN with respect to the instruction at PC. This may create new
454 Return the chronologically latest function segment, never NULL. */
456 static struct btrace_function *
457 ftrace_update_function (struct gdbarch *gdbarch,
458 struct btrace_function *bfun, CORE_ADDR pc)
460 struct bound_minimal_symbol bmfun;
461 struct minimal_symbol *mfun;
463 struct btrace_insn *last;
465 /* Try to determine the function we're in. We use both types of symbols
466 to avoid surprises when we sometimes get a full symbol and sometimes
467 only a minimal symbol. */
468 fun = find_pc_function (pc);
469 bmfun = lookup_minimal_symbol_by_pc (pc);
472 if (fun == NULL && mfun == NULL)
473 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
475 /* If we didn't have a function before, we create one. */
477 return ftrace_new_function (bfun, mfun, fun);
479 /* Check the last instruction, if we have one.
480 We do this check first, since it allows us to fill in the call stack
481 links in addition to the normal flow links. */
483 if (!VEC_empty (btrace_insn_s, bfun->insn))
484 last = VEC_last (btrace_insn_s, bfun->insn);
492 /* Check for returns. */
493 if (gdbarch_insn_is_ret (gdbarch, lpc))
494 return ftrace_new_return (gdbarch, bfun, mfun, fun);
496 /* Check for calls. */
497 if (gdbarch_insn_is_call (gdbarch, lpc))
501 size = gdb_insn_length (gdbarch, lpc);
503 /* Ignore calls to the next instruction. They are used for PIC. */
504 if (lpc + size != pc)
505 return ftrace_new_call (bfun, mfun, fun);
509 /* Check if we're switching functions for some other reason. */
510 if (ftrace_function_switched (bfun, mfun, fun))
512 DEBUG_FTRACE ("switching from %s in %s at %s",
513 ftrace_print_insn_addr (last),
514 ftrace_print_function_name (bfun),
515 ftrace_print_filename (bfun));
519 CORE_ADDR start, lpc;
521 start = get_pc_function_start (pc);
523 /* If we can't determine the function for PC, we treat a jump at
524 the end of the block as tail call. */
530 /* Jumps indicate optimized tail calls. */
531 if (start == pc && gdbarch_insn_is_jump (gdbarch, lpc))
532 return ftrace_new_tailcall (bfun, mfun, fun);
535 return ftrace_new_switch (bfun, mfun, fun);
541 /* Update BFUN's source range with respect to the instruction at PC. */
544 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
546 struct symtab_and_line sal;
547 const char *fullname;
549 sal = find_pc_line (pc, 0);
550 if (sal.symtab == NULL || sal.line == 0)
552 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
556 /* Check if we switched files. This could happen if, say, a macro that
557 is defined in another file is expanded here. */
558 fullname = symtab_to_fullname (sal.symtab);
559 if (ftrace_skip_file (bfun, fullname))
561 DEBUG_FTRACE ("ignoring file at %s, file=%s",
562 core_addr_to_string_nz (pc), fullname);
566 /* Update the line range. */
567 bfun->lbegin = min (bfun->lbegin, sal.line);
568 bfun->lend = max (bfun->lend, sal.line);
570 if (record_debug > 1)
571 ftrace_debug (bfun, "update lines");
574 /* Add the instruction at PC to BFUN's instructions. */
577 ftrace_update_insns (struct btrace_function *bfun, CORE_ADDR pc)
579 struct btrace_insn *insn;
581 insn = VEC_safe_push (btrace_insn_s, bfun->insn, NULL);
584 if (record_debug > 1)
585 ftrace_debug (bfun, "update insn");
588 /* Compute the function branch trace from BTS trace. */
591 btrace_compute_ftrace_bts (struct thread_info *tp,
592 const struct btrace_data_bts *btrace)
594 struct btrace_thread_info *btinfo;
595 struct btrace_function *begin, *end;
596 struct gdbarch *gdbarch;
600 gdbarch = target_gdbarch ();
601 btinfo = &tp->btrace;
602 begin = btinfo->begin;
604 level = begin != NULL ? -btinfo->level : INT_MAX;
605 blk = VEC_length (btrace_block_s, btrace->blocks);
609 btrace_block_s *block;
614 block = VEC_index (btrace_block_s, btrace->blocks, blk);
621 /* We should hit the end of the block. Warn if we went too far. */
624 warning (_("Recorded trace may be corrupted around %s."),
625 core_addr_to_string_nz (pc));
629 end = ftrace_update_function (gdbarch, end, pc);
633 /* Maintain the function level offset.
634 For all but the last block, we do it here. */
636 level = min (level, end->level);
638 ftrace_update_insns (end, pc);
639 ftrace_update_lines (end, pc);
641 /* We're done once we pushed the instruction at the end. */
642 if (block->end == pc)
645 size = gdb_insn_length (gdbarch, pc);
647 /* Make sure we terminate if we fail to compute the size. */
650 warning (_("Recorded trace may be incomplete around %s."),
651 core_addr_to_string_nz (pc));
657 /* Maintain the function level offset.
658 For the last block, we do it here to not consider the last
660 Since the last instruction corresponds to the current instruction
661 and is not really part of the execution history, it shouldn't
664 level = min (level, end->level);
668 btinfo->begin = begin;
671 /* LEVEL is the minimal function level of all btrace function segments.
672 Define the global level offset to -LEVEL so all function levels are
673 normalized to start at zero. */
674 btinfo->level = -level;
677 /* Compute the function branch trace from a block branch trace BTRACE for
678 a thread given by BTINFO. */
681 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
683 DEBUG ("compute ftrace");
685 switch (btrace->format)
687 case BTRACE_FORMAT_NONE:
690 case BTRACE_FORMAT_BTS:
691 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
695 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
698 /* Add an entry for the current PC. */
701 btrace_add_pc (struct thread_info *tp)
703 struct btrace_data btrace;
704 struct btrace_block *block;
705 struct regcache *regcache;
706 struct cleanup *cleanup;
709 regcache = get_thread_regcache (tp->ptid);
710 pc = regcache_read_pc (regcache);
712 btrace_data_init (&btrace);
713 btrace.format = BTRACE_FORMAT_BTS;
714 btrace.variant.bts.blocks = NULL;
716 cleanup = make_cleanup_btrace_data (&btrace);
718 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
722 btrace_compute_ftrace (tp, &btrace);
724 do_cleanups (cleanup);
730 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
732 if (tp->btrace.target != NULL)
735 if (!target_supports_btrace (conf->format))
736 error (_("Target does not support branch tracing."));
738 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
740 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
742 /* Add an entry for the current PC so we start tracing from where we
744 if (tp->btrace.target != NULL)
750 const struct btrace_config *
751 btrace_conf (const struct btrace_thread_info *btinfo)
753 if (btinfo->target == NULL)
756 return target_btrace_conf (btinfo->target);
762 btrace_disable (struct thread_info *tp)
764 struct btrace_thread_info *btp = &tp->btrace;
767 if (btp->target == NULL)
770 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
772 target_disable_btrace (btp->target);
781 btrace_teardown (struct thread_info *tp)
783 struct btrace_thread_info *btp = &tp->btrace;
786 if (btp->target == NULL)
789 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
791 target_teardown_btrace (btp->target);
797 /* Stitch branch trace in BTS format. */
800 btrace_stitch_bts (struct btrace_data_bts *btrace,
801 const struct btrace_thread_info *btinfo)
803 struct btrace_function *last_bfun;
804 struct btrace_insn *last_insn;
805 btrace_block_s *first_new_block;
807 last_bfun = btinfo->end;
808 gdb_assert (last_bfun != NULL);
810 /* Beware that block trace starts with the most recent block, so the
811 chronologically first block in the new trace is the last block in
812 the new trace's block vector. */
813 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
814 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
815 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
817 /* If the current PC at the end of the block is the same as in our current
818 trace, there are two explanations:
819 1. we executed the instruction and some branch brought us back.
820 2. we have not made any progress.
821 In the first case, the delta trace vector should contain at least two
823 In the second case, the delta trace vector should contain exactly one
824 entry for the partial block containing the current PC. Remove it. */
825 if (first_new_block->end == last_insn->pc
826 && VEC_length (btrace_block_s, btrace->blocks) == 1)
828 VEC_pop (btrace_block_s, btrace->blocks);
832 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
833 core_addr_to_string_nz (first_new_block->end));
835 /* Do a simple sanity check to make sure we don't accidentally end up
836 with a bad block. This should not occur in practice. */
837 if (first_new_block->end < last_insn->pc)
839 warning (_("Error while trying to read delta trace. Falling back to "
844 /* We adjust the last block to start at the end of our current trace. */
845 gdb_assert (first_new_block->begin == 0);
846 first_new_block->begin = last_insn->pc;
848 /* We simply pop the last insn so we can insert it again as part of
849 the normal branch trace computation.
850 Since instruction iterators are based on indices in the instructions
851 vector, we don't leave any pointers dangling. */
852 DEBUG ("pruning insn at %s for stitching",
853 ftrace_print_insn_addr (last_insn));
855 VEC_pop (btrace_insn_s, last_bfun->insn);
857 /* The instructions vector may become empty temporarily if this has
858 been the only instruction in this function segment.
859 This violates the invariant but will be remedied shortly by
860 btrace_compute_ftrace when we add the new trace. */
864 /* Adjust the block trace in order to stitch old and new trace together.
865 BTRACE is the new delta trace between the last and the current stop.
866 BTINFO is the old branch trace until the last stop.
867 May modifx BTRACE as well as the existing trace in BTINFO.
868 Return 0 on success, -1 otherwise. */
871 btrace_stitch_trace (struct btrace_data *btrace,
872 const struct btrace_thread_info *btinfo)
874 /* If we don't have trace, there's nothing to do. */
875 if (btrace_data_empty (btrace))
878 switch (btrace->format)
880 case BTRACE_FORMAT_NONE:
883 case BTRACE_FORMAT_BTS:
884 return btrace_stitch_bts (&btrace->variant.bts, btinfo);
887 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
890 /* Clear the branch trace histories in BTINFO. */
893 btrace_clear_history (struct btrace_thread_info *btinfo)
895 xfree (btinfo->insn_history);
896 xfree (btinfo->call_history);
897 xfree (btinfo->replay);
899 btinfo->insn_history = NULL;
900 btinfo->call_history = NULL;
901 btinfo->replay = NULL;
907 btrace_fetch (struct thread_info *tp)
909 struct btrace_thread_info *btinfo;
910 struct btrace_target_info *tinfo;
911 struct btrace_data btrace;
912 struct cleanup *cleanup;
915 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
917 btinfo = &tp->btrace;
918 tinfo = btinfo->target;
922 /* There's no way we could get new trace while replaying.
923 On the other hand, delta trace would return a partial record with the
924 current PC, which is the replay PC, not the last PC, as expected. */
925 if (btinfo->replay != NULL)
928 btrace_data_init (&btrace);
929 cleanup = make_cleanup_btrace_data (&btrace);
931 /* Let's first try to extend the trace we already have. */
932 if (btinfo->end != NULL)
934 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
937 /* Success. Let's try to stitch the traces together. */
938 errcode = btrace_stitch_trace (&btrace, btinfo);
942 /* We failed to read delta trace. Let's try to read new trace. */
943 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
945 /* If we got any new trace, discard what we have. */
946 if (errcode == 0 && !btrace_data_empty (&btrace))
950 /* If we were not able to read the trace, we start over. */
954 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
958 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
960 /* If we were not able to read the branch trace, signal an error. */
962 error (_("Failed to read branch trace."));
964 /* Compute the trace, provided we have any. */
965 if (!btrace_data_empty (&btrace))
967 btrace_clear_history (btinfo);
968 btrace_compute_ftrace (tp, &btrace);
971 do_cleanups (cleanup);
977 btrace_clear (struct thread_info *tp)
979 struct btrace_thread_info *btinfo;
980 struct btrace_function *it, *trash;
982 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
984 /* Make sure btrace frames that may hold a pointer into the branch
985 trace data are destroyed. */
986 reinit_frame_cache ();
988 btinfo = &tp->btrace;
999 btinfo->begin = NULL;
1002 btrace_clear_history (btinfo);
1008 btrace_free_objfile (struct objfile *objfile)
1010 struct thread_info *tp;
1012 DEBUG ("free objfile");
1014 ALL_NON_EXITED_THREADS (tp)
1018 #if defined (HAVE_LIBEXPAT)
1020 /* Check the btrace document version. */
1023 check_xml_btrace_version (struct gdb_xml_parser *parser,
1024 const struct gdb_xml_element *element,
1025 void *user_data, VEC (gdb_xml_value_s) *attributes)
1027 const char *version = xml_find_attribute (attributes, "version")->value;
1029 if (strcmp (version, "1.0") != 0)
1030 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1033 /* Parse a btrace "block" xml record. */
1036 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1037 const struct gdb_xml_element *element,
1038 void *user_data, VEC (gdb_xml_value_s) *attributes)
1040 struct btrace_data *btrace;
1041 struct btrace_block *block;
1042 ULONGEST *begin, *end;
1046 switch (btrace->format)
1048 case BTRACE_FORMAT_BTS:
1051 case BTRACE_FORMAT_NONE:
1052 btrace->format = BTRACE_FORMAT_BTS;
1053 btrace->variant.bts.blocks = NULL;
1057 gdb_xml_error (parser, _("Btrace format error."));
1060 begin = xml_find_attribute (attributes, "begin")->value;
1061 end = xml_find_attribute (attributes, "end")->value;
1063 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1064 block->begin = *begin;
1068 static const struct gdb_xml_attribute block_attributes[] = {
1069 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1070 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1071 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1074 static const struct gdb_xml_attribute btrace_attributes[] = {
1075 { "version", GDB_XML_AF_NONE, NULL, NULL },
1076 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1079 static const struct gdb_xml_element btrace_children[] = {
1080 { "block", block_attributes, NULL,
1081 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1082 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1085 static const struct gdb_xml_element btrace_elements[] = {
1086 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1087 check_xml_btrace_version, NULL },
1088 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1091 #endif /* defined (HAVE_LIBEXPAT) */
1096 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1098 struct cleanup *cleanup;
1101 #if defined (HAVE_LIBEXPAT)
1103 btrace->format = BTRACE_FORMAT_NONE;
1105 cleanup = make_cleanup_btrace_data (btrace);
1106 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1109 error (_("Error parsing branch trace."));
1111 /* Keep parse results. */
1112 discard_cleanups (cleanup);
1114 #else /* !defined (HAVE_LIBEXPAT) */
1116 error (_("Cannot process branch trace. XML parsing is not supported."));
1118 #endif /* !defined (HAVE_LIBEXPAT) */
1121 #if defined (HAVE_LIBEXPAT)
1123 /* Parse a btrace-conf "bts" xml record. */
1126 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1127 const struct gdb_xml_element *element,
1128 void *user_data, VEC (gdb_xml_value_s) *attributes)
1130 struct btrace_config *conf;
1131 struct gdb_xml_value *size;
1134 conf->format = BTRACE_FORMAT_BTS;
1137 size = xml_find_attribute (attributes, "size");
1139 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1142 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1143 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1144 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1147 static const struct gdb_xml_element btrace_conf_children[] = {
1148 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1149 parse_xml_btrace_conf_bts, NULL },
1150 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1153 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1154 { "version", GDB_XML_AF_NONE, NULL, NULL },
1155 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1158 static const struct gdb_xml_element btrace_conf_elements[] = {
1159 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1160 GDB_XML_EF_NONE, NULL, NULL },
1161 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1164 #endif /* defined (HAVE_LIBEXPAT) */
1169 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1173 #if defined (HAVE_LIBEXPAT)
1175 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1176 btrace_conf_elements, xml, conf);
1178 error (_("Error parsing branch trace configuration."));
1180 #else /* !defined (HAVE_LIBEXPAT) */
1182 error (_("XML parsing is not supported."));
1184 #endif /* !defined (HAVE_LIBEXPAT) */
1189 const struct btrace_insn *
1190 btrace_insn_get (const struct btrace_insn_iterator *it)
1192 const struct btrace_function *bfun;
1193 unsigned int index, end;
1196 bfun = it->function;
1198 /* The index is within the bounds of this function's instruction vector. */
1199 end = VEC_length (btrace_insn_s, bfun->insn);
1200 gdb_assert (0 < end);
1201 gdb_assert (index < end);
1203 return VEC_index (btrace_insn_s, bfun->insn, index);
1209 btrace_insn_number (const struct btrace_insn_iterator *it)
1211 const struct btrace_function *bfun;
1213 bfun = it->function;
1214 return bfun->insn_offset + it->index;
1220 btrace_insn_begin (struct btrace_insn_iterator *it,
1221 const struct btrace_thread_info *btinfo)
1223 const struct btrace_function *bfun;
1225 bfun = btinfo->begin;
1227 error (_("No trace."));
1229 it->function = bfun;
1236 btrace_insn_end (struct btrace_insn_iterator *it,
1237 const struct btrace_thread_info *btinfo)
1239 const struct btrace_function *bfun;
1240 unsigned int length;
1244 error (_("No trace."));
1246 /* The last instruction in the last function is the current instruction.
1247 We point to it - it is one past the end of the execution trace. */
1248 length = VEC_length (btrace_insn_s, bfun->insn);
1250 it->function = bfun;
1251 it->index = length - 1;
1257 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1259 const struct btrace_function *bfun;
1260 unsigned int index, steps;
1262 bfun = it->function;
1268 unsigned int end, space, adv;
1270 end = VEC_length (btrace_insn_s, bfun->insn);
1272 gdb_assert (0 < end);
1273 gdb_assert (index < end);
1275 /* Compute the number of instructions remaining in this segment. */
1276 space = end - index;
1278 /* Advance the iterator as far as possible within this segment. */
1279 adv = min (space, stride);
1284 /* Move to the next function if we're at the end of this one. */
1287 const struct btrace_function *next;
1289 next = bfun->flow.next;
1292 /* We stepped past the last function.
1294 Let's adjust the index to point to the last instruction in
1295 the previous function. */
1301 /* We now point to the first instruction in the new function. */
1306 /* We did make progress. */
1307 gdb_assert (adv > 0);
1310 /* Update the iterator. */
1311 it->function = bfun;
1320 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1322 const struct btrace_function *bfun;
1323 unsigned int index, steps;
1325 bfun = it->function;
1333 /* Move to the previous function if we're at the start of this one. */
1336 const struct btrace_function *prev;
1338 prev = bfun->flow.prev;
1342 /* We point to one after the last instruction in the new function. */
1344 index = VEC_length (btrace_insn_s, bfun->insn);
1346 /* There is at least one instruction in this function segment. */
1347 gdb_assert (index > 0);
1350 /* Advance the iterator as far as possible within this segment. */
1351 adv = min (index, stride);
1356 /* We did make progress. */
1357 gdb_assert (adv > 0);
1360 /* Update the iterator. */
1361 it->function = bfun;
1370 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1371 const struct btrace_insn_iterator *rhs)
1373 unsigned int lnum, rnum;
1375 lnum = btrace_insn_number (lhs);
1376 rnum = btrace_insn_number (rhs);
1378 return (int) (lnum - rnum);
1384 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1385 const struct btrace_thread_info *btinfo,
1386 unsigned int number)
1388 const struct btrace_function *bfun;
1391 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1392 if (bfun->insn_offset <= number)
1398 end = bfun->insn_offset + VEC_length (btrace_insn_s, bfun->insn);
1402 it->function = bfun;
1403 it->index = number - bfun->insn_offset;
1410 const struct btrace_function *
1411 btrace_call_get (const struct btrace_call_iterator *it)
1413 return it->function;
1419 btrace_call_number (const struct btrace_call_iterator *it)
1421 const struct btrace_thread_info *btinfo;
1422 const struct btrace_function *bfun;
1425 btinfo = it->btinfo;
1426 bfun = it->function;
1428 return bfun->number;
1430 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1431 number of the last function. */
1433 insns = VEC_length (btrace_insn_s, bfun->insn);
1435 /* If the function contains only a single instruction (i.e. the current
1436 instruction), it will be skipped and its number is already the number
1439 return bfun->number;
1441 /* Otherwise, return one more than the number of the last function. */
1442 return bfun->number + 1;
1448 btrace_call_begin (struct btrace_call_iterator *it,
1449 const struct btrace_thread_info *btinfo)
1451 const struct btrace_function *bfun;
1453 bfun = btinfo->begin;
1455 error (_("No trace."));
1457 it->btinfo = btinfo;
1458 it->function = bfun;
1464 btrace_call_end (struct btrace_call_iterator *it,
1465 const struct btrace_thread_info *btinfo)
1467 const struct btrace_function *bfun;
1471 error (_("No trace."));
1473 it->btinfo = btinfo;
1474 it->function = NULL;
1480 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1482 const struct btrace_function *bfun;
1485 bfun = it->function;
1487 while (bfun != NULL)
1489 const struct btrace_function *next;
1492 next = bfun->flow.next;
1495 /* Ignore the last function if it only contains a single
1496 (i.e. the current) instruction. */
1497 insns = VEC_length (btrace_insn_s, bfun->insn);
1502 if (stride == steps)
1509 it->function = bfun;
1516 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1518 const struct btrace_thread_info *btinfo;
1519 const struct btrace_function *bfun;
1522 bfun = it->function;
1529 btinfo = it->btinfo;
1534 /* Ignore the last function if it only contains a single
1535 (i.e. the current) instruction. */
1536 insns = VEC_length (btrace_insn_s, bfun->insn);
1538 bfun = bfun->flow.prev;
1546 while (steps < stride)
1548 const struct btrace_function *prev;
1550 prev = bfun->flow.prev;
1558 it->function = bfun;
1565 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1566 const struct btrace_call_iterator *rhs)
1568 unsigned int lnum, rnum;
1570 lnum = btrace_call_number (lhs);
1571 rnum = btrace_call_number (rhs);
1573 return (int) (lnum - rnum);
1579 btrace_find_call_by_number (struct btrace_call_iterator *it,
1580 const struct btrace_thread_info *btinfo,
1581 unsigned int number)
1583 const struct btrace_function *bfun;
1585 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1589 bnum = bfun->number;
1592 it->btinfo = btinfo;
1593 it->function = bfun;
1597 /* Functions are ordered and numbered consecutively. We could bail out
1598 earlier. On the other hand, it is very unlikely that we search for
1599 a nonexistent function. */
1608 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1609 const struct btrace_insn_iterator *begin,
1610 const struct btrace_insn_iterator *end)
1612 if (btinfo->insn_history == NULL)
1613 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1615 btinfo->insn_history->begin = *begin;
1616 btinfo->insn_history->end = *end;
1622 btrace_set_call_history (struct btrace_thread_info *btinfo,
1623 const struct btrace_call_iterator *begin,
1624 const struct btrace_call_iterator *end)
1626 gdb_assert (begin->btinfo == end->btinfo);
1628 if (btinfo->call_history == NULL)
1629 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1631 btinfo->call_history->begin = *begin;
1632 btinfo->call_history->end = *end;
1638 btrace_is_replaying (struct thread_info *tp)
1640 return tp->btrace.replay != NULL;
1646 btrace_is_empty (struct thread_info *tp)
1648 struct btrace_insn_iterator begin, end;
1649 struct btrace_thread_info *btinfo;
1651 btinfo = &tp->btrace;
1653 if (btinfo->begin == NULL)
1656 btrace_insn_begin (&begin, btinfo);
1657 btrace_insn_end (&end, btinfo);
1659 return btrace_insn_cmp (&begin, &end) == 0;
1662 /* Forward the cleanup request. */
1665 do_btrace_data_cleanup (void *arg)
1667 btrace_data_fini (arg);
1673 make_cleanup_btrace_data (struct btrace_data *data)
1675 return make_cleanup (do_btrace_data_cleanup, data);