1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
38 #define DEBUG(msg, args...) \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
53 ftrace_print_function_name (const struct btrace_function *bfun)
55 struct minimal_symbol *msym;
62 return SYMBOL_PRINT_NAME (sym);
65 return MSYMBOL_PRINT_NAME (msym);
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
74 ftrace_print_filename (const struct btrace_function *bfun)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
84 filename = "<unknown>";
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
98 return core_addr_to_string_nz (insn->pc);
101 /* Print an ftrace debug status message. */
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int lbegin, lend, level;
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
114 lbegin = bfun->lbegin;
117 ibegin = bfun->insn_offset;
118 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
120 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, lines = [%d; %d], "
121 "insn = [%u; %u)", prefix, fun, file, level, lbegin, lend,
125 /* Return non-zero if BFUN does not match MFUN and FUN,
126 return zero otherwise. */
129 ftrace_function_switched (const struct btrace_function *bfun,
130 const struct minimal_symbol *mfun,
131 const struct symbol *fun)
133 struct minimal_symbol *msym;
139 /* If the minimal symbol changed, we certainly switched functions. */
140 if (mfun != NULL && msym != NULL
141 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
144 /* If the symbol changed, we certainly switched functions. */
145 if (fun != NULL && sym != NULL)
147 const char *bfname, *fname;
149 /* Check the function name. */
150 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
153 /* Check the location of those functions, as well. */
154 bfname = symtab_to_fullname (symbol_symtab (sym));
155 fname = symtab_to_fullname (symbol_symtab (fun));
156 if (filename_cmp (fname, bfname) != 0)
160 /* If we lost symbol information, we switched functions. */
161 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
164 /* If we gained symbol information, we switched functions. */
165 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
171 /* Return non-zero if we should skip this file when generating the function
172 call history, zero otherwise.
173 We would want to do that if, say, a macro that is defined in another file
174 is expanded in this function. */
177 ftrace_skip_file (const struct btrace_function *bfun, const char *fullname)
186 bfile = symtab_to_fullname (symbol_symtab (sym));
188 return (filename_cmp (bfile, fullname) != 0);
191 /* Allocate and initialize a new branch trace function segment.
192 PREV is the chronologically preceding function segment.
193 MFUN and FUN are the symbol information we have for this function. */
195 static struct btrace_function *
196 ftrace_new_function (struct btrace_function *prev,
197 struct minimal_symbol *mfun,
200 struct btrace_function *bfun;
202 bfun = xzalloc (sizeof (*bfun));
206 bfun->flow.prev = prev;
208 /* We start with the identities of min and max, respectively. */
209 bfun->lbegin = INT_MAX;
210 bfun->lend = INT_MIN;
214 /* Start counting at one. */
216 bfun->insn_offset = 1;
220 gdb_assert (prev->flow.next == NULL);
221 prev->flow.next = bfun;
223 bfun->number = prev->number + 1;
224 bfun->insn_offset = (prev->insn_offset
225 + VEC_length (btrace_insn_s, prev->insn));
226 bfun->level = prev->level;
232 /* Update the UP field of a function segment. */
235 ftrace_update_caller (struct btrace_function *bfun,
236 struct btrace_function *caller,
237 enum btrace_function_flag flags)
239 if (bfun->up != NULL)
240 ftrace_debug (bfun, "updating caller");
245 ftrace_debug (bfun, "set caller");
248 /* Fix up the caller for all segments of a function. */
251 ftrace_fixup_caller (struct btrace_function *bfun,
252 struct btrace_function *caller,
253 enum btrace_function_flag flags)
255 struct btrace_function *prev, *next;
257 ftrace_update_caller (bfun, caller, flags);
259 /* Update all function segments belonging to the same function. */
260 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
261 ftrace_update_caller (prev, caller, flags);
263 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
264 ftrace_update_caller (next, caller, flags);
267 /* Add a new function segment for a call.
268 CALLER is the chronologically preceding function segment.
269 MFUN and FUN are the symbol information we have for this function. */
271 static struct btrace_function *
272 ftrace_new_call (struct btrace_function *caller,
273 struct minimal_symbol *mfun,
276 struct btrace_function *bfun;
278 bfun = ftrace_new_function (caller, mfun, fun);
282 ftrace_debug (bfun, "new call");
287 /* Add a new function segment for a tail call.
288 CALLER is the chronologically preceding function segment.
289 MFUN and FUN are the symbol information we have for this function. */
291 static struct btrace_function *
292 ftrace_new_tailcall (struct btrace_function *caller,
293 struct minimal_symbol *mfun,
296 struct btrace_function *bfun;
298 bfun = ftrace_new_function (caller, mfun, fun);
301 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
303 ftrace_debug (bfun, "new tail call");
308 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
309 symbol information. */
311 static struct btrace_function *
312 ftrace_find_caller (struct btrace_function *bfun,
313 struct minimal_symbol *mfun,
316 for (; bfun != NULL; bfun = bfun->up)
318 /* Skip functions with incompatible symbol information. */
319 if (ftrace_function_switched (bfun, mfun, fun))
322 /* This is the function segment we're looking for. */
329 /* Find the innermost caller in the back trace of BFUN, skipping all
330 function segments that do not end with a call instruction (e.g.
331 tail calls ending with a jump). */
333 static struct btrace_function *
334 ftrace_find_call (struct btrace_function *bfun)
336 for (; bfun != NULL; bfun = bfun->up)
338 struct btrace_insn *last;
341 if (bfun->errcode != 0)
344 last = VEC_last (btrace_insn_s, bfun->insn);
346 if (last->iclass == BTRACE_INSN_CALL)
353 /* Add a continuation segment for a function into which we return.
354 PREV is the chronologically preceding function segment.
355 MFUN and FUN are the symbol information we have for this function. */
357 static struct btrace_function *
358 ftrace_new_return (struct btrace_function *prev,
359 struct minimal_symbol *mfun,
362 struct btrace_function *bfun, *caller;
364 bfun = ftrace_new_function (prev, mfun, fun);
366 /* It is important to start at PREV's caller. Otherwise, we might find
367 PREV itself, if PREV is a recursive function. */
368 caller = ftrace_find_caller (prev->up, mfun, fun);
371 /* The caller of PREV is the preceding btrace function segment in this
372 function instance. */
373 gdb_assert (caller->segment.next == NULL);
375 caller->segment.next = bfun;
376 bfun->segment.prev = caller;
378 /* Maintain the function level. */
379 bfun->level = caller->level;
381 /* Maintain the call stack. */
382 bfun->up = caller->up;
383 bfun->flags = caller->flags;
385 ftrace_debug (bfun, "new return");
389 /* We did not find a caller. This could mean that something went
390 wrong or that the call is simply not included in the trace. */
392 /* Let's search for some actual call. */
393 caller = ftrace_find_call (prev->up);
396 /* There is no call in PREV's back trace. We assume that the
397 branch trace did not include it. */
399 /* Let's find the topmost call function - this skips tail calls. */
400 while (prev->up != NULL)
403 /* We maintain levels for a series of returns for which we have
405 We start at the preceding function's level in case this has
406 already been a return for which we have not seen the call.
407 We start at level 0 otherwise, to handle tail calls correctly. */
408 bfun->level = min (0, prev->level) - 1;
410 /* Fix up the call stack for PREV. */
411 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
413 ftrace_debug (bfun, "new return - no caller");
417 /* There is a call in PREV's back trace to which we should have
418 returned. Let's remain at this level. */
419 bfun->level = prev->level;
421 ftrace_debug (bfun, "new return - unknown caller");
428 /* Add a new function segment for a function switch.
429 PREV is the chronologically preceding function segment.
430 MFUN and FUN are the symbol information we have for this function. */
432 static struct btrace_function *
433 ftrace_new_switch (struct btrace_function *prev,
434 struct minimal_symbol *mfun,
437 struct btrace_function *bfun;
439 /* This is an unexplained function switch. The call stack will likely
440 be wrong at this point. */
441 bfun = ftrace_new_function (prev, mfun, fun);
443 ftrace_debug (bfun, "new switch");
448 /* Add a new function segment for a gap in the trace due to a decode error.
449 PREV is the chronologically preceding function segment.
450 ERRCODE is the format-specific error code. */
452 static struct btrace_function *
453 ftrace_new_gap (struct btrace_function *prev, int errcode)
455 struct btrace_function *bfun;
457 /* We hijack prev if it was empty. */
458 if (prev != NULL && prev->errcode == 0
459 && VEC_empty (btrace_insn_s, prev->insn))
462 bfun = ftrace_new_function (prev, NULL, NULL);
464 bfun->errcode = errcode;
466 ftrace_debug (bfun, "new gap");
471 /* Update BFUN with respect to the instruction at PC. This may create new
473 Return the chronologically latest function segment, never NULL. */
475 static struct btrace_function *
476 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
478 struct bound_minimal_symbol bmfun;
479 struct minimal_symbol *mfun;
481 struct btrace_insn *last;
483 /* Try to determine the function we're in. We use both types of symbols
484 to avoid surprises when we sometimes get a full symbol and sometimes
485 only a minimal symbol. */
486 fun = find_pc_function (pc);
487 bmfun = lookup_minimal_symbol_by_pc (pc);
490 if (fun == NULL && mfun == NULL)
491 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
493 /* If we didn't have a function or if we had a gap before, we create one. */
494 if (bfun == NULL || bfun->errcode != 0)
495 return ftrace_new_function (bfun, mfun, fun);
497 /* Check the last instruction, if we have one.
498 We do this check first, since it allows us to fill in the call stack
499 links in addition to the normal flow links. */
501 if (!VEC_empty (btrace_insn_s, bfun->insn))
502 last = VEC_last (btrace_insn_s, bfun->insn);
506 switch (last->iclass)
508 case BTRACE_INSN_RETURN:
509 return ftrace_new_return (bfun, mfun, fun);
511 case BTRACE_INSN_CALL:
512 /* Ignore calls to the next instruction. They are used for PIC. */
513 if (last->pc + last->size == pc)
516 return ftrace_new_call (bfun, mfun, fun);
518 case BTRACE_INSN_JUMP:
522 start = get_pc_function_start (pc);
524 /* If we can't determine the function for PC, we treat a jump at
525 the end of the block as tail call. */
526 if (start == 0 || start == pc)
527 return ftrace_new_tailcall (bfun, mfun, fun);
532 /* Check if we're switching functions for some other reason. */
533 if (ftrace_function_switched (bfun, mfun, fun))
535 DEBUG_FTRACE ("switching from %s in %s at %s",
536 ftrace_print_insn_addr (last),
537 ftrace_print_function_name (bfun),
538 ftrace_print_filename (bfun));
540 return ftrace_new_switch (bfun, mfun, fun);
546 /* Update BFUN's source range with respect to the instruction at PC. */
549 ftrace_update_lines (struct btrace_function *bfun, CORE_ADDR pc)
551 struct symtab_and_line sal;
552 const char *fullname;
554 sal = find_pc_line (pc, 0);
555 if (sal.symtab == NULL || sal.line == 0)
557 DEBUG_FTRACE ("no lines at %s", core_addr_to_string_nz (pc));
561 /* Check if we switched files. This could happen if, say, a macro that
562 is defined in another file is expanded here. */
563 fullname = symtab_to_fullname (sal.symtab);
564 if (ftrace_skip_file (bfun, fullname))
566 DEBUG_FTRACE ("ignoring file at %s, file=%s",
567 core_addr_to_string_nz (pc), fullname);
571 /* Update the line range. */
572 bfun->lbegin = min (bfun->lbegin, sal.line);
573 bfun->lend = max (bfun->lend, sal.line);
575 if (record_debug > 1)
576 ftrace_debug (bfun, "update lines");
579 /* Add the instruction at PC to BFUN's instructions. */
582 ftrace_update_insns (struct btrace_function *bfun,
583 const struct btrace_insn *insn)
585 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
587 if (record_debug > 1)
588 ftrace_debug (bfun, "update insn");
591 /* Classify the instruction at PC. */
593 static enum btrace_insn_class
594 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
596 volatile struct gdb_exception error;
597 enum btrace_insn_class iclass;
599 iclass = BTRACE_INSN_OTHER;
600 TRY_CATCH (error, RETURN_MASK_ERROR)
602 if (gdbarch_insn_is_call (gdbarch, pc))
603 iclass = BTRACE_INSN_CALL;
604 else if (gdbarch_insn_is_ret (gdbarch, pc))
605 iclass = BTRACE_INSN_RETURN;
606 else if (gdbarch_insn_is_jump (gdbarch, pc))
607 iclass = BTRACE_INSN_JUMP;
613 /* Compute the function branch trace from BTS trace. */
616 btrace_compute_ftrace_bts (struct thread_info *tp,
617 const struct btrace_data_bts *btrace)
619 struct btrace_thread_info *btinfo;
620 struct btrace_function *begin, *end;
621 struct gdbarch *gdbarch;
622 unsigned int blk, ngaps;
625 gdbarch = target_gdbarch ();
626 btinfo = &tp->btrace;
627 begin = btinfo->begin;
629 ngaps = btinfo->ngaps;
630 level = begin != NULL ? -btinfo->level : INT_MAX;
631 blk = VEC_length (btrace_block_s, btrace->blocks);
635 btrace_block_s *block;
640 block = VEC_index (btrace_block_s, btrace->blocks, blk);
645 volatile struct gdb_exception error;
646 struct btrace_insn insn;
649 /* We should hit the end of the block. Warn if we went too far. */
652 /* Indicate the gap in the trace - unless we're at the
656 warning (_("Recorded trace may be corrupted around %s."),
657 core_addr_to_string_nz (pc));
659 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
665 end = ftrace_update_function (end, pc);
669 /* Maintain the function level offset.
670 For all but the last block, we do it here. */
672 level = min (level, end->level);
675 TRY_CATCH (error, RETURN_MASK_ERROR)
676 size = gdb_insn_length (gdbarch, pc);
680 insn.iclass = ftrace_classify_insn (gdbarch, pc);
682 ftrace_update_insns (end, &insn);
683 ftrace_update_lines (end, pc);
685 /* We're done once we pushed the instruction at the end. */
686 if (block->end == pc)
689 /* We can't continue if we fail to compute the size. */
692 warning (_("Recorded trace may be incomplete around %s."),
693 core_addr_to_string_nz (pc));
695 /* Indicate the gap in the trace. We just added INSN so we're
696 not at the beginning. */
697 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
705 /* Maintain the function level offset.
706 For the last block, we do it here to not consider the last
708 Since the last instruction corresponds to the current instruction
709 and is not really part of the execution history, it shouldn't
712 level = min (level, end->level);
716 btinfo->begin = begin;
718 btinfo->ngaps = ngaps;
720 /* LEVEL is the minimal function level of all btrace function segments.
721 Define the global level offset to -LEVEL so all function levels are
722 normalized to start at zero. */
723 btinfo->level = -level;
726 /* Compute the function branch trace from a block branch trace BTRACE for
727 a thread given by BTINFO. */
730 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
732 DEBUG ("compute ftrace");
734 switch (btrace->format)
736 case BTRACE_FORMAT_NONE:
739 case BTRACE_FORMAT_BTS:
740 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
744 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
747 /* Add an entry for the current PC. */
750 btrace_add_pc (struct thread_info *tp)
752 struct btrace_data btrace;
753 struct btrace_block *block;
754 struct regcache *regcache;
755 struct cleanup *cleanup;
758 regcache = get_thread_regcache (tp->ptid);
759 pc = regcache_read_pc (regcache);
761 btrace_data_init (&btrace);
762 btrace.format = BTRACE_FORMAT_BTS;
763 btrace.variant.bts.blocks = NULL;
765 cleanup = make_cleanup_btrace_data (&btrace);
767 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
771 btrace_compute_ftrace (tp, &btrace);
773 do_cleanups (cleanup);
779 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
781 if (tp->btrace.target != NULL)
784 if (!target_supports_btrace (conf->format))
785 error (_("Target does not support branch tracing."));
787 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
789 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
791 /* Add an entry for the current PC so we start tracing from where we
793 if (tp->btrace.target != NULL)
799 const struct btrace_config *
800 btrace_conf (const struct btrace_thread_info *btinfo)
802 if (btinfo->target == NULL)
805 return target_btrace_conf (btinfo->target);
811 btrace_disable (struct thread_info *tp)
813 struct btrace_thread_info *btp = &tp->btrace;
816 if (btp->target == NULL)
819 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
821 target_disable_btrace (btp->target);
830 btrace_teardown (struct thread_info *tp)
832 struct btrace_thread_info *btp = &tp->btrace;
835 if (btp->target == NULL)
838 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
840 target_teardown_btrace (btp->target);
846 /* Stitch branch trace in BTS format. */
849 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
851 struct btrace_thread_info *btinfo;
852 struct btrace_function *last_bfun;
853 struct btrace_insn *last_insn;
854 btrace_block_s *first_new_block;
856 btinfo = &tp->btrace;
857 last_bfun = btinfo->end;
858 gdb_assert (last_bfun != NULL);
859 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
861 /* If the existing trace ends with a gap, we just glue the traces
862 together. We need to drop the last (i.e. chronologically first) block
863 of the new trace, though, since we can't fill in the start address.*/
864 if (VEC_empty (btrace_insn_s, last_bfun->insn))
866 VEC_pop (btrace_block_s, btrace->blocks);
870 /* Beware that block trace starts with the most recent block, so the
871 chronologically first block in the new trace is the last block in
872 the new trace's block vector. */
873 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
874 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
876 /* If the current PC at the end of the block is the same as in our current
877 trace, there are two explanations:
878 1. we executed the instruction and some branch brought us back.
879 2. we have not made any progress.
880 In the first case, the delta trace vector should contain at least two
882 In the second case, the delta trace vector should contain exactly one
883 entry for the partial block containing the current PC. Remove it. */
884 if (first_new_block->end == last_insn->pc
885 && VEC_length (btrace_block_s, btrace->blocks) == 1)
887 VEC_pop (btrace_block_s, btrace->blocks);
891 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
892 core_addr_to_string_nz (first_new_block->end));
894 /* Do a simple sanity check to make sure we don't accidentally end up
895 with a bad block. This should not occur in practice. */
896 if (first_new_block->end < last_insn->pc)
898 warning (_("Error while trying to read delta trace. Falling back to "
903 /* We adjust the last block to start at the end of our current trace. */
904 gdb_assert (first_new_block->begin == 0);
905 first_new_block->begin = last_insn->pc;
907 /* We simply pop the last insn so we can insert it again as part of
908 the normal branch trace computation.
909 Since instruction iterators are based on indices in the instructions
910 vector, we don't leave any pointers dangling. */
911 DEBUG ("pruning insn at %s for stitching",
912 ftrace_print_insn_addr (last_insn));
914 VEC_pop (btrace_insn_s, last_bfun->insn);
916 /* The instructions vector may become empty temporarily if this has
917 been the only instruction in this function segment.
918 This violates the invariant but will be remedied shortly by
919 btrace_compute_ftrace when we add the new trace. */
921 /* The only case where this would hurt is if the entire trace consisted
922 of just that one instruction. If we remove it, we might turn the now
923 empty btrace function segment into a gap. But we don't want gaps at
924 the beginning. To avoid this, we remove the entire old trace. */
925 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
931 /* Adjust the block trace in order to stitch old and new trace together.
932 BTRACE is the new delta trace between the last and the current stop.
933 TP is the traced thread.
934 May modifx BTRACE as well as the existing trace in TP.
935 Return 0 on success, -1 otherwise. */
938 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
940 /* If we don't have trace, there's nothing to do. */
941 if (btrace_data_empty (btrace))
944 switch (btrace->format)
946 case BTRACE_FORMAT_NONE:
949 case BTRACE_FORMAT_BTS:
950 return btrace_stitch_bts (&btrace->variant.bts, tp);
953 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
956 /* Clear the branch trace histories in BTINFO. */
959 btrace_clear_history (struct btrace_thread_info *btinfo)
961 xfree (btinfo->insn_history);
962 xfree (btinfo->call_history);
963 xfree (btinfo->replay);
965 btinfo->insn_history = NULL;
966 btinfo->call_history = NULL;
967 btinfo->replay = NULL;
973 btrace_fetch (struct thread_info *tp)
975 struct btrace_thread_info *btinfo;
976 struct btrace_target_info *tinfo;
977 struct btrace_data btrace;
978 struct cleanup *cleanup;
981 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
983 btinfo = &tp->btrace;
984 tinfo = btinfo->target;
988 /* There's no way we could get new trace while replaying.
989 On the other hand, delta trace would return a partial record with the
990 current PC, which is the replay PC, not the last PC, as expected. */
991 if (btinfo->replay != NULL)
994 btrace_data_init (&btrace);
995 cleanup = make_cleanup_btrace_data (&btrace);
997 /* Let's first try to extend the trace we already have. */
998 if (btinfo->end != NULL)
1000 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1003 /* Success. Let's try to stitch the traces together. */
1004 errcode = btrace_stitch_trace (&btrace, tp);
1008 /* We failed to read delta trace. Let's try to read new trace. */
1009 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1011 /* If we got any new trace, discard what we have. */
1012 if (errcode == 0 && !btrace_data_empty (&btrace))
1016 /* If we were not able to read the trace, we start over. */
1020 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1024 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1026 /* If we were not able to read the branch trace, signal an error. */
1028 error (_("Failed to read branch trace."));
1030 /* Compute the trace, provided we have any. */
1031 if (!btrace_data_empty (&btrace))
1033 btrace_clear_history (btinfo);
1034 btrace_compute_ftrace (tp, &btrace);
1037 do_cleanups (cleanup);
1043 btrace_clear (struct thread_info *tp)
1045 struct btrace_thread_info *btinfo;
1046 struct btrace_function *it, *trash;
1048 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1050 /* Make sure btrace frames that may hold a pointer into the branch
1051 trace data are destroyed. */
1052 reinit_frame_cache ();
1054 btinfo = &tp->btrace;
1065 btinfo->begin = NULL;
1069 btrace_clear_history (btinfo);
1075 btrace_free_objfile (struct objfile *objfile)
1077 struct thread_info *tp;
1079 DEBUG ("free objfile");
1081 ALL_NON_EXITED_THREADS (tp)
1085 #if defined (HAVE_LIBEXPAT)
1087 /* Check the btrace document version. */
1090 check_xml_btrace_version (struct gdb_xml_parser *parser,
1091 const struct gdb_xml_element *element,
1092 void *user_data, VEC (gdb_xml_value_s) *attributes)
1094 const char *version = xml_find_attribute (attributes, "version")->value;
1096 if (strcmp (version, "1.0") != 0)
1097 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1100 /* Parse a btrace "block" xml record. */
1103 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1104 const struct gdb_xml_element *element,
1105 void *user_data, VEC (gdb_xml_value_s) *attributes)
1107 struct btrace_data *btrace;
1108 struct btrace_block *block;
1109 ULONGEST *begin, *end;
1113 switch (btrace->format)
1115 case BTRACE_FORMAT_BTS:
1118 case BTRACE_FORMAT_NONE:
1119 btrace->format = BTRACE_FORMAT_BTS;
1120 btrace->variant.bts.blocks = NULL;
1124 gdb_xml_error (parser, _("Btrace format error."));
1127 begin = xml_find_attribute (attributes, "begin")->value;
1128 end = xml_find_attribute (attributes, "end")->value;
1130 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1131 block->begin = *begin;
1135 static const struct gdb_xml_attribute block_attributes[] = {
1136 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1137 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1138 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1141 static const struct gdb_xml_attribute btrace_attributes[] = {
1142 { "version", GDB_XML_AF_NONE, NULL, NULL },
1143 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1146 static const struct gdb_xml_element btrace_children[] = {
1147 { "block", block_attributes, NULL,
1148 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1149 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1152 static const struct gdb_xml_element btrace_elements[] = {
1153 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1154 check_xml_btrace_version, NULL },
1155 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1158 #endif /* defined (HAVE_LIBEXPAT) */
1163 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1165 struct cleanup *cleanup;
1168 #if defined (HAVE_LIBEXPAT)
1170 btrace->format = BTRACE_FORMAT_NONE;
1172 cleanup = make_cleanup_btrace_data (btrace);
1173 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1176 error (_("Error parsing branch trace."));
1178 /* Keep parse results. */
1179 discard_cleanups (cleanup);
1181 #else /* !defined (HAVE_LIBEXPAT) */
1183 error (_("Cannot process branch trace. XML parsing is not supported."));
1185 #endif /* !defined (HAVE_LIBEXPAT) */
1188 #if defined (HAVE_LIBEXPAT)
1190 /* Parse a btrace-conf "bts" xml record. */
1193 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1194 const struct gdb_xml_element *element,
1195 void *user_data, VEC (gdb_xml_value_s) *attributes)
1197 struct btrace_config *conf;
1198 struct gdb_xml_value *size;
1201 conf->format = BTRACE_FORMAT_BTS;
1204 size = xml_find_attribute (attributes, "size");
1206 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1209 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1210 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1211 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1214 static const struct gdb_xml_element btrace_conf_children[] = {
1215 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1216 parse_xml_btrace_conf_bts, NULL },
1217 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1220 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1221 { "version", GDB_XML_AF_NONE, NULL, NULL },
1222 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1225 static const struct gdb_xml_element btrace_conf_elements[] = {
1226 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1227 GDB_XML_EF_NONE, NULL, NULL },
1228 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1231 #endif /* defined (HAVE_LIBEXPAT) */
1236 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1240 #if defined (HAVE_LIBEXPAT)
1242 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1243 btrace_conf_elements, xml, conf);
1245 error (_("Error parsing branch trace configuration."));
1247 #else /* !defined (HAVE_LIBEXPAT) */
1249 error (_("XML parsing is not supported."));
1251 #endif /* !defined (HAVE_LIBEXPAT) */
1256 const struct btrace_insn *
1257 btrace_insn_get (const struct btrace_insn_iterator *it)
1259 const struct btrace_function *bfun;
1260 unsigned int index, end;
1263 bfun = it->function;
1265 /* Check if the iterator points to a gap in the trace. */
1266 if (bfun->errcode != 0)
1269 /* The index is within the bounds of this function's instruction vector. */
1270 end = VEC_length (btrace_insn_s, bfun->insn);
1271 gdb_assert (0 < end);
1272 gdb_assert (index < end);
1274 return VEC_index (btrace_insn_s, bfun->insn, index);
1280 btrace_insn_number (const struct btrace_insn_iterator *it)
1282 const struct btrace_function *bfun;
1284 bfun = it->function;
1286 /* Return zero if the iterator points to a gap in the trace. */
1287 if (bfun->errcode != 0)
1290 return bfun->insn_offset + it->index;
1296 btrace_insn_begin (struct btrace_insn_iterator *it,
1297 const struct btrace_thread_info *btinfo)
1299 const struct btrace_function *bfun;
1301 bfun = btinfo->begin;
1303 error (_("No trace."));
1305 it->function = bfun;
1312 btrace_insn_end (struct btrace_insn_iterator *it,
1313 const struct btrace_thread_info *btinfo)
1315 const struct btrace_function *bfun;
1316 unsigned int length;
1320 error (_("No trace."));
1322 length = VEC_length (btrace_insn_s, bfun->insn);
1324 /* The last function may either be a gap or it contains the current
1325 instruction, which is one past the end of the execution trace; ignore
1330 it->function = bfun;
1337 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1339 const struct btrace_function *bfun;
1340 unsigned int index, steps;
1342 bfun = it->function;
1348 unsigned int end, space, adv;
1350 end = VEC_length (btrace_insn_s, bfun->insn);
1352 /* An empty function segment represents a gap in the trace. We count
1353 it as one instruction. */
1356 const struct btrace_function *next;
1358 next = bfun->flow.next;
1371 gdb_assert (0 < end);
1372 gdb_assert (index < end);
1374 /* Compute the number of instructions remaining in this segment. */
1375 space = end - index;
1377 /* Advance the iterator as far as possible within this segment. */
1378 adv = min (space, stride);
1383 /* Move to the next function if we're at the end of this one. */
1386 const struct btrace_function *next;
1388 next = bfun->flow.next;
1391 /* We stepped past the last function.
1393 Let's adjust the index to point to the last instruction in
1394 the previous function. */
1400 /* We now point to the first instruction in the new function. */
1405 /* We did make progress. */
1406 gdb_assert (adv > 0);
1409 /* Update the iterator. */
1410 it->function = bfun;
1419 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1421 const struct btrace_function *bfun;
1422 unsigned int index, steps;
1424 bfun = it->function;
1432 /* Move to the previous function if we're at the start of this one. */
1435 const struct btrace_function *prev;
1437 prev = bfun->flow.prev;
1441 /* We point to one after the last instruction in the new function. */
1443 index = VEC_length (btrace_insn_s, bfun->insn);
1445 /* An empty function segment represents a gap in the trace. We count
1446 it as one instruction. */
1456 /* Advance the iterator as far as possible within this segment. */
1457 adv = min (index, stride);
1463 /* We did make progress. */
1464 gdb_assert (adv > 0);
1467 /* Update the iterator. */
1468 it->function = bfun;
1477 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1478 const struct btrace_insn_iterator *rhs)
1480 unsigned int lnum, rnum;
1482 lnum = btrace_insn_number (lhs);
1483 rnum = btrace_insn_number (rhs);
1485 /* A gap has an instruction number of zero. Things are getting more
1486 complicated if gaps are involved.
1488 We take the instruction number offset from the iterator's function.
1489 This is the number of the first instruction after the gap.
1491 This is OK as long as both lhs and rhs point to gaps. If only one of
1492 them does, we need to adjust the number based on the other's regular
1493 instruction number. Otherwise, a gap might compare equal to an
1496 if (lnum == 0 && rnum == 0)
1498 lnum = lhs->function->insn_offset;
1499 rnum = rhs->function->insn_offset;
1503 lnum = lhs->function->insn_offset;
1510 rnum = rhs->function->insn_offset;
1516 return (int) (lnum - rnum);
1522 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1523 const struct btrace_thread_info *btinfo,
1524 unsigned int number)
1526 const struct btrace_function *bfun;
1527 unsigned int end, length;
1529 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1532 if (bfun->errcode != 0)
1535 if (bfun->insn_offset <= number)
1542 length = VEC_length (btrace_insn_s, bfun->insn);
1543 gdb_assert (length > 0);
1545 end = bfun->insn_offset + length;
1549 it->function = bfun;
1550 it->index = number - bfun->insn_offset;
1557 const struct btrace_function *
1558 btrace_call_get (const struct btrace_call_iterator *it)
1560 return it->function;
1566 btrace_call_number (const struct btrace_call_iterator *it)
1568 const struct btrace_thread_info *btinfo;
1569 const struct btrace_function *bfun;
1572 btinfo = it->btinfo;
1573 bfun = it->function;
1575 return bfun->number;
1577 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1578 number of the last function. */
1580 insns = VEC_length (btrace_insn_s, bfun->insn);
1582 /* If the function contains only a single instruction (i.e. the current
1583 instruction), it will be skipped and its number is already the number
1586 return bfun->number;
1588 /* Otherwise, return one more than the number of the last function. */
1589 return bfun->number + 1;
1595 btrace_call_begin (struct btrace_call_iterator *it,
1596 const struct btrace_thread_info *btinfo)
1598 const struct btrace_function *bfun;
1600 bfun = btinfo->begin;
1602 error (_("No trace."));
1604 it->btinfo = btinfo;
1605 it->function = bfun;
1611 btrace_call_end (struct btrace_call_iterator *it,
1612 const struct btrace_thread_info *btinfo)
1614 const struct btrace_function *bfun;
1618 error (_("No trace."));
1620 it->btinfo = btinfo;
1621 it->function = NULL;
1627 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1629 const struct btrace_function *bfun;
1632 bfun = it->function;
1634 while (bfun != NULL)
1636 const struct btrace_function *next;
1639 next = bfun->flow.next;
1642 /* Ignore the last function if it only contains a single
1643 (i.e. the current) instruction. */
1644 insns = VEC_length (btrace_insn_s, bfun->insn);
1649 if (stride == steps)
1656 it->function = bfun;
1663 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1665 const struct btrace_thread_info *btinfo;
1666 const struct btrace_function *bfun;
1669 bfun = it->function;
1676 btinfo = it->btinfo;
1681 /* Ignore the last function if it only contains a single
1682 (i.e. the current) instruction. */
1683 insns = VEC_length (btrace_insn_s, bfun->insn);
1685 bfun = bfun->flow.prev;
1693 while (steps < stride)
1695 const struct btrace_function *prev;
1697 prev = bfun->flow.prev;
1705 it->function = bfun;
1712 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1713 const struct btrace_call_iterator *rhs)
1715 unsigned int lnum, rnum;
1717 lnum = btrace_call_number (lhs);
1718 rnum = btrace_call_number (rhs);
1720 return (int) (lnum - rnum);
1726 btrace_find_call_by_number (struct btrace_call_iterator *it,
1727 const struct btrace_thread_info *btinfo,
1728 unsigned int number)
1730 const struct btrace_function *bfun;
1732 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1736 bnum = bfun->number;
1739 it->btinfo = btinfo;
1740 it->function = bfun;
1744 /* Functions are ordered and numbered consecutively. We could bail out
1745 earlier. On the other hand, it is very unlikely that we search for
1746 a nonexistent function. */
1755 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1756 const struct btrace_insn_iterator *begin,
1757 const struct btrace_insn_iterator *end)
1759 if (btinfo->insn_history == NULL)
1760 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1762 btinfo->insn_history->begin = *begin;
1763 btinfo->insn_history->end = *end;
1769 btrace_set_call_history (struct btrace_thread_info *btinfo,
1770 const struct btrace_call_iterator *begin,
1771 const struct btrace_call_iterator *end)
1773 gdb_assert (begin->btinfo == end->btinfo);
1775 if (btinfo->call_history == NULL)
1776 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1778 btinfo->call_history->begin = *begin;
1779 btinfo->call_history->end = *end;
1785 btrace_is_replaying (struct thread_info *tp)
1787 return tp->btrace.replay != NULL;
1793 btrace_is_empty (struct thread_info *tp)
1795 struct btrace_insn_iterator begin, end;
1796 struct btrace_thread_info *btinfo;
1798 btinfo = &tp->btrace;
1800 if (btinfo->begin == NULL)
1803 btrace_insn_begin (&begin, btinfo);
1804 btrace_insn_end (&end, btinfo);
1806 return btrace_insn_cmp (&begin, &end) == 0;
1809 /* Forward the cleanup request. */
1812 do_btrace_data_cleanup (void *arg)
1814 btrace_data_fini (arg);
1820 make_cleanup_btrace_data (struct btrace_data *data)
1822 return make_cleanup (do_btrace_data_cleanup, data);