1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
56 static void btrace_add_pc (struct thread_info *tp);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function *bfun)
78 struct minimal_symbol *msym;
85 return SYMBOL_PRINT_NAME (sym);
88 return MSYMBOL_PRINT_NAME (msym);
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function *bfun)
100 const char *filename;
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
107 filename = "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
121 return core_addr_to_string_nz (insn->pc);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
129 const char *fun, *file;
130 unsigned int ibegin, iend;
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function* bfun)
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
156 return VEC_length (btrace_insn_s, bfun->insn);
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
162 static struct btrace_function *
163 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
166 if (number == 0 || number > btinfo->functions.size ())
169 return btinfo->functions[number - 1];
172 /* Return non-zero if BFUN does not match MFUN and FUN,
173 return zero otherwise. */
176 ftrace_function_switched (const struct btrace_function *bfun,
177 const struct minimal_symbol *mfun,
178 const struct symbol *fun)
180 struct minimal_symbol *msym;
186 /* If the minimal symbol changed, we certainly switched functions. */
187 if (mfun != NULL && msym != NULL
188 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
191 /* If the symbol changed, we certainly switched functions. */
192 if (fun != NULL && sym != NULL)
194 const char *bfname, *fname;
196 /* Check the function name. */
197 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
200 /* Check the location of those functions, as well. */
201 bfname = symtab_to_fullname (symbol_symtab (sym));
202 fname = symtab_to_fullname (symbol_symtab (fun));
203 if (filename_cmp (fname, bfname) != 0)
207 /* If we lost symbol information, we switched functions. */
208 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
211 /* If we gained symbol information, we switched functions. */
212 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
218 /* Allocate and initialize a new branch trace function segment at the end of
220 BTINFO is the branch trace information for the current thread.
221 MFUN and FUN are the symbol information we have for this function. */
223 static struct btrace_function *
224 ftrace_new_function (struct btrace_thread_info *btinfo,
225 struct minimal_symbol *mfun,
228 struct btrace_function *bfun;
230 bfun = XCNEW (struct btrace_function);
235 if (btinfo->functions.empty ())
237 /* Start counting at one. */
239 bfun->insn_offset = 1;
243 struct btrace_function *prev = btinfo->functions.back ();
245 bfun->number = prev->number + 1;
246 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
247 bfun->level = prev->level;
250 btinfo->functions.push_back (bfun);
254 /* Update the UP field of a function segment. */
257 ftrace_update_caller (struct btrace_function *bfun,
258 struct btrace_function *caller,
259 enum btrace_function_flag flags)
262 ftrace_debug (bfun, "updating caller");
264 bfun->up = caller->number;
267 ftrace_debug (bfun, "set caller");
268 ftrace_debug (caller, "..to");
271 /* Fix up the caller for all segments of a function. */
274 ftrace_fixup_caller (struct btrace_function *bfun,
275 struct btrace_function *caller,
276 enum btrace_function_flag flags)
278 struct btrace_function *prev, *next;
280 ftrace_update_caller (bfun, caller, flags);
282 /* Update all function segments belonging to the same function. */
283 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
284 ftrace_update_caller (prev, caller, flags);
286 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
287 ftrace_update_caller (next, caller, flags);
290 /* Add a new function segment for a call at the end of the trace.
291 BTINFO is the branch trace information for the current thread.
292 MFUN and FUN are the symbol information we have for this function. */
294 static struct btrace_function *
295 ftrace_new_call (struct btrace_thread_info *btinfo,
296 struct minimal_symbol *mfun,
299 const unsigned int length = btinfo->functions.size ();
300 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
305 ftrace_debug (bfun, "new call");
310 /* Add a new function segment for a tail call at the end of the trace.
311 BTINFO is the branch trace information for the current thread.
312 MFUN and FUN are the symbol information we have for this function. */
314 static struct btrace_function *
315 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
316 struct minimal_symbol *mfun,
319 const unsigned int length = btinfo->functions.size ();
320 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
324 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
326 ftrace_debug (bfun, "new tail call");
331 /* Return the caller of BFUN or NULL if there is none. This function skips
332 tail calls in the call chain. BTINFO is the branch trace information for
333 the current thread. */
334 static struct btrace_function *
335 ftrace_get_caller (struct btrace_thread_info *btinfo,
336 struct btrace_function *bfun)
338 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
339 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
340 return ftrace_find_call_by_number (btinfo, bfun->up);
345 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
346 symbol information. BTINFO is the branch trace information for the current
349 static struct btrace_function *
350 ftrace_find_caller (struct btrace_thread_info *btinfo,
351 struct btrace_function *bfun,
352 struct minimal_symbol *mfun,
355 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
357 /* Skip functions with incompatible symbol information. */
358 if (ftrace_function_switched (bfun, mfun, fun))
361 /* This is the function segment we're looking for. */
368 /* Find the innermost caller in the back trace of BFUN, skipping all
369 function segments that do not end with a call instruction (e.g.
370 tail calls ending with a jump). BTINFO is the branch trace information for
371 the current thread. */
373 static struct btrace_function *
374 ftrace_find_call (struct btrace_thread_info *btinfo,
375 struct btrace_function *bfun)
377 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
379 struct btrace_insn *last;
382 if (bfun->errcode != 0)
385 last = VEC_last (btrace_insn_s, bfun->insn);
387 if (last->iclass == BTRACE_INSN_CALL)
394 /* Add a continuation segment for a function into which we return at the end of
396 BTINFO is the branch trace information for the current thread.
397 MFUN and FUN are the symbol information we have for this function. */
399 static struct btrace_function *
400 ftrace_new_return (struct btrace_thread_info *btinfo,
401 struct minimal_symbol *mfun,
404 struct btrace_function *prev = btinfo->functions.back ();
405 struct btrace_function *bfun, *caller;
407 bfun = ftrace_new_function (btinfo, mfun, fun);
409 /* It is important to start at PREV's caller. Otherwise, we might find
410 PREV itself, if PREV is a recursive function. */
411 caller = ftrace_find_call_by_number (btinfo, prev->up);
412 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
415 /* The caller of PREV is the preceding btrace function segment in this
416 function instance. */
417 gdb_assert (caller->segment.next == NULL);
419 caller->segment.next = bfun;
420 bfun->segment.prev = caller;
422 /* Maintain the function level. */
423 bfun->level = caller->level;
425 /* Maintain the call stack. */
426 bfun->up = caller->up;
427 bfun->flags = caller->flags;
429 ftrace_debug (bfun, "new return");
433 /* We did not find a caller. This could mean that something went
434 wrong or that the call is simply not included in the trace. */
436 /* Let's search for some actual call. */
437 caller = ftrace_find_call_by_number (btinfo, prev->up);
438 caller = ftrace_find_call (btinfo, caller);
441 /* There is no call in PREV's back trace. We assume that the
442 branch trace did not include it. */
444 /* Let's find the topmost function and add a new caller for it.
445 This should handle a series of initial tail calls. */
446 while (prev->up != 0)
447 prev = ftrace_find_call_by_number (btinfo, prev->up);
449 bfun->level = prev->level - 1;
451 /* Fix up the call stack for PREV. */
452 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
454 ftrace_debug (bfun, "new return - no caller");
458 /* There is a call in PREV's back trace to which we should have
459 returned but didn't. Let's start a new, separate back trace
460 from PREV's level. */
461 bfun->level = prev->level - 1;
463 /* We fix up the back trace for PREV but leave other function segments
464 on the same level as they are.
465 This should handle things like schedule () correctly where we're
466 switching contexts. */
467 prev->up = bfun->number;
468 prev->flags = BFUN_UP_LINKS_TO_RET;
470 ftrace_debug (bfun, "new return - unknown caller");
477 /* Add a new function segment for a function switch at the end of the trace.
478 BTINFO is the branch trace information for the current thread.
479 MFUN and FUN are the symbol information we have for this function. */
481 static struct btrace_function *
482 ftrace_new_switch (struct btrace_thread_info *btinfo,
483 struct minimal_symbol *mfun,
486 struct btrace_function *prev = btinfo->functions.back ();
487 struct btrace_function *bfun;
489 /* This is an unexplained function switch. We can't really be sure about the
490 call stack, yet the best I can think of right now is to preserve it. */
491 bfun = ftrace_new_function (btinfo, mfun, fun);
493 bfun->flags = prev->flags;
495 ftrace_debug (bfun, "new switch");
500 /* Add a new function segment for a gap in the trace due to a decode error at
501 the end of the trace.
502 BTINFO is the branch trace information for the current thread.
503 ERRCODE is the format-specific error code. */
505 static struct btrace_function *
506 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
508 struct btrace_function *bfun;
510 if (btinfo->functions.empty ())
511 bfun = ftrace_new_function (btinfo, NULL, NULL);
514 /* We hijack the previous function segment if it was empty. */
515 bfun = btinfo->functions.back ();
516 if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
517 bfun = ftrace_new_function (btinfo, NULL, NULL);
520 bfun->errcode = errcode;
522 ftrace_debug (bfun, "new gap");
527 /* Update the current function segment at the end of the trace in BTINFO with
528 respect to the instruction at PC. This may create new function segments.
529 Return the chronologically latest function segment, never NULL. */
531 static struct btrace_function *
532 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
534 struct bound_minimal_symbol bmfun;
535 struct minimal_symbol *mfun;
537 struct btrace_insn *last;
538 struct btrace_function *bfun;
540 /* Try to determine the function we're in. We use both types of symbols
541 to avoid surprises when we sometimes get a full symbol and sometimes
542 only a minimal symbol. */
543 fun = find_pc_function (pc);
544 bmfun = lookup_minimal_symbol_by_pc (pc);
547 if (fun == NULL && mfun == NULL)
548 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
550 /* If we didn't have a function, we create one. */
551 if (btinfo->functions.empty ())
552 return ftrace_new_function (btinfo, mfun, fun);
554 /* If we had a gap before, we create a function. */
555 bfun = btinfo->functions.back ();
556 if (bfun->errcode != 0)
557 return ftrace_new_function (btinfo, mfun, fun);
559 /* Check the last instruction, if we have one.
560 We do this check first, since it allows us to fill in the call stack
561 links in addition to the normal flow links. */
563 if (!VEC_empty (btrace_insn_s, bfun->insn))
564 last = VEC_last (btrace_insn_s, bfun->insn);
568 switch (last->iclass)
570 case BTRACE_INSN_RETURN:
574 /* On some systems, _dl_runtime_resolve returns to the resolved
575 function instead of jumping to it. From our perspective,
576 however, this is a tailcall.
577 If we treated it as return, we wouldn't be able to find the
578 resolved function in our stack back trace. Hence, we would
579 lose the current stack back trace and start anew with an empty
580 back trace. When the resolved function returns, we would then
581 create a stack back trace with the same function names but
582 different frame id's. This will confuse stepping. */
583 fname = ftrace_print_function_name (bfun);
584 if (strcmp (fname, "_dl_runtime_resolve") == 0)
585 return ftrace_new_tailcall (btinfo, mfun, fun);
587 return ftrace_new_return (btinfo, mfun, fun);
590 case BTRACE_INSN_CALL:
591 /* Ignore calls to the next instruction. They are used for PIC. */
592 if (last->pc + last->size == pc)
595 return ftrace_new_call (btinfo, mfun, fun);
597 case BTRACE_INSN_JUMP:
601 start = get_pc_function_start (pc);
603 /* A jump to the start of a function is (typically) a tail call. */
605 return ftrace_new_tailcall (btinfo, mfun, fun);
607 /* If we can't determine the function for PC, we treat a jump at
608 the end of the block as tail call if we're switching functions
609 and as an intra-function branch if we don't. */
610 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
611 return ftrace_new_tailcall (btinfo, mfun, fun);
618 /* Check if we're switching functions for some other reason. */
619 if (ftrace_function_switched (bfun, mfun, fun))
621 DEBUG_FTRACE ("switching from %s in %s at %s",
622 ftrace_print_insn_addr (last),
623 ftrace_print_function_name (bfun),
624 ftrace_print_filename (bfun));
626 return ftrace_new_switch (btinfo, mfun, fun);
632 /* Add the instruction at PC to BFUN's instructions. */
635 ftrace_update_insns (struct btrace_function *bfun,
636 const struct btrace_insn *insn)
638 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
640 if (record_debug > 1)
641 ftrace_debug (bfun, "update insn");
644 /* Classify the instruction at PC. */
646 static enum btrace_insn_class
647 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
649 enum btrace_insn_class iclass;
651 iclass = BTRACE_INSN_OTHER;
654 if (gdbarch_insn_is_call (gdbarch, pc))
655 iclass = BTRACE_INSN_CALL;
656 else if (gdbarch_insn_is_ret (gdbarch, pc))
657 iclass = BTRACE_INSN_RETURN;
658 else if (gdbarch_insn_is_jump (gdbarch, pc))
659 iclass = BTRACE_INSN_JUMP;
661 CATCH (error, RETURN_MASK_ERROR)
669 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
670 number of matching function segments or zero if the back traces do not
671 match. BTINFO is the branch trace information for the current thread. */
674 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
675 struct btrace_function *lhs,
676 struct btrace_function *rhs)
680 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
682 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
685 lhs = ftrace_get_caller (btinfo, lhs);
686 rhs = ftrace_get_caller (btinfo, rhs);
692 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
693 BTINFO is the branch trace information for the current thread. */
696 ftrace_fixup_level (struct btrace_thread_info *btinfo,
697 struct btrace_function *bfun, int adjustment)
702 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
703 ftrace_debug (bfun, "..bfun");
707 bfun->level += adjustment;
708 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
712 /* Recompute the global level offset. Traverse the function trace and compute
713 the global level offset as the negative of the minimal function level. */
716 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
723 if (btinfo->functions.empty ())
726 unsigned int length = btinfo->functions.size() - 1;
727 for (unsigned int i = 0; i < length; ++i)
728 level = std::min (level, btinfo->functions[i]->level);
730 /* The last function segment contains the current instruction, which is not
731 really part of the trace. If it contains just this one instruction, we
732 ignore the segment. */
733 struct btrace_function *last = btinfo->functions.back();
734 if (VEC_length (btrace_insn_s, last->insn) != 1)
735 level = std::min (level, last->level);
737 DEBUG_FTRACE ("setting global level offset: %d", -level);
738 btinfo->level = -level;
741 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
742 ftrace_connect_backtrace. BTINFO is the branch trace information for the
746 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
747 struct btrace_function *prev,
748 struct btrace_function *next)
750 DEBUG_FTRACE ("connecting...");
751 ftrace_debug (prev, "..prev");
752 ftrace_debug (next, "..next");
754 /* The function segments are not yet connected. */
755 gdb_assert (prev->segment.next == NULL);
756 gdb_assert (next->segment.prev == NULL);
758 prev->segment.next = next;
759 next->segment.prev = prev;
761 /* We may have moved NEXT to a different function level. */
762 ftrace_fixup_level (btinfo, next, prev->level - next->level);
764 /* If we run out of back trace for one, let's use the other's. */
767 const btrace_function_flags flags = next->flags;
769 next = ftrace_find_call_by_number (btinfo, next->up);
772 DEBUG_FTRACE ("using next's callers");
773 ftrace_fixup_caller (prev, next, flags);
776 else if (next->up == 0)
778 const btrace_function_flags flags = prev->flags;
780 prev = ftrace_find_call_by_number (btinfo, prev->up);
783 DEBUG_FTRACE ("using prev's callers");
784 ftrace_fixup_caller (next, prev, flags);
789 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
790 link to add the tail callers to NEXT's back trace.
792 This removes NEXT->UP from NEXT's back trace. It will be added back
793 when connecting NEXT and PREV's callers - provided they exist.
795 If PREV's back trace consists of a series of tail calls without an
796 actual call, there will be no further connection and NEXT's caller will
797 be removed for good. To catch this case, we handle it here and connect
798 the top of PREV's back trace to NEXT's caller. */
799 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
801 struct btrace_function *caller;
802 btrace_function_flags next_flags, prev_flags;
804 /* We checked NEXT->UP above so CALLER can't be NULL. */
805 caller = ftrace_find_call_by_number (btinfo, next->up);
806 next_flags = next->flags;
807 prev_flags = prev->flags;
809 DEBUG_FTRACE ("adding prev's tail calls to next");
811 prev = ftrace_find_call_by_number (btinfo, prev->up);
812 ftrace_fixup_caller (next, prev, prev_flags);
814 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
817 /* At the end of PREV's back trace, continue with CALLER. */
820 DEBUG_FTRACE ("fixing up link for tailcall chain");
821 ftrace_debug (prev, "..top");
822 ftrace_debug (caller, "..up");
824 ftrace_fixup_caller (prev, caller, next_flags);
826 /* If we skipped any tail calls, this may move CALLER to a
827 different function level.
829 Note that changing CALLER's level is only OK because we
830 know that this is the last iteration of the bottom-to-top
831 walk in ftrace_connect_backtrace.
833 Otherwise we will fix up CALLER's level when we connect it
834 to PREV's caller in the next iteration. */
835 ftrace_fixup_level (btinfo, caller,
836 prev->level - caller->level - 1);
840 /* There's nothing to do if we find a real call. */
841 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
843 DEBUG_FTRACE ("will fix up link in next iteration");
851 /* Connect function segments on the same level in the back trace at LHS and RHS.
852 The back traces at LHS and RHS are expected to match according to
853 ftrace_match_backtrace. BTINFO is the branch trace information for the
857 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
858 struct btrace_function *lhs,
859 struct btrace_function *rhs)
861 while (lhs != NULL && rhs != NULL)
863 struct btrace_function *prev, *next;
865 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
867 /* Connecting LHS and RHS may change the up link. */
871 lhs = ftrace_get_caller (btinfo, lhs);
872 rhs = ftrace_get_caller (btinfo, rhs);
874 ftrace_connect_bfun (btinfo, prev, next);
878 /* Bridge the gap between two function segments left and right of a gap if their
879 respective back traces match in at least MIN_MATCHES functions. BTINFO is
880 the branch trace information for the current thread.
882 Returns non-zero if the gap could be bridged, zero otherwise. */
885 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
886 struct btrace_function *lhs, struct btrace_function *rhs,
889 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
892 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
893 rhs->insn_offset - 1, min_matches);
899 /* We search the back traces of LHS and RHS for valid connections and connect
900 the two functon segments that give the longest combined back trace. */
902 for (cand_l = lhs; cand_l != NULL;
903 cand_l = ftrace_get_caller (btinfo, cand_l))
904 for (cand_r = rhs; cand_r != NULL;
905 cand_r = ftrace_get_caller (btinfo, cand_r))
909 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
910 if (best_matches < matches)
912 best_matches = matches;
918 /* We need at least MIN_MATCHES matches. */
919 gdb_assert (min_matches > 0);
920 if (best_matches < min_matches)
923 DEBUG_FTRACE ("..matches: %d", best_matches);
925 /* We will fix up the level of BEST_R and succeeding function segments such
926 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
928 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
929 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
931 To catch this, we already fix up the level here where we can start at RHS
932 instead of at BEST_R. We will ignore the level fixup when connecting
933 BEST_L to BEST_R as they will already be on the same level. */
934 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
936 ftrace_connect_backtrace (btinfo, best_l, best_r);
941 /* Try to bridge gaps due to overflow or decode errors by connecting the
942 function segments that are separated by the gap. */
945 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
947 struct btrace_thread_info *btinfo;
948 VEC (bfun_s) *remaining;
949 struct cleanup *old_chain;
952 DEBUG ("bridge gaps");
954 btinfo = &tp->btrace;
956 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
958 /* We require a minimum amount of matches for bridging a gap. The number of
959 required matches will be lowered with each iteration.
961 The more matches the higher our confidence that the bridging is correct.
962 For big gaps or small traces, however, it may not be feasible to require a
963 high number of matches. */
964 for (min_matches = 5; min_matches > 0; --min_matches)
966 /* Let's try to bridge as many gaps as we can. In some cases, we need to
967 skip a gap and revisit it again after we closed later gaps. */
968 while (!VEC_empty (bfun_s, *gaps))
970 struct btrace_function *gap;
973 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
975 struct btrace_function *lhs, *rhs;
978 /* We may have a sequence of gaps if we run from one error into
979 the next as we try to re-sync onto the trace stream. Ignore
980 all but the leftmost gap in such a sequence.
982 Also ignore gaps at the beginning of the trace. */
983 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
984 if (lhs == NULL || lhs->errcode != 0)
987 /* Skip gaps to the right. */
988 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
989 while (rhs != NULL && rhs->errcode != 0)
990 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
992 /* Ignore gaps at the end of the trace. */
996 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
998 /* Keep track of gaps we were not able to bridge and try again.
999 If we just pushed them to the end of GAPS we would risk an
1000 infinite loop in case we simply cannot bridge a gap. */
1002 VEC_safe_push (bfun_s, remaining, gap);
1005 /* Let's see if we made any progress. */
1006 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
1009 VEC_free (bfun_s, *gaps);
1015 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1016 if (VEC_empty (bfun_s, *gaps))
1019 VEC_free (bfun_s, remaining);
1022 do_cleanups (old_chain);
1024 /* We may omit this in some cases. Not sure it is worth the extra
1025 complication, though. */
1026 ftrace_compute_global_level_offset (btinfo);
1029 /* Compute the function branch trace from BTS trace. */
1032 btrace_compute_ftrace_bts (struct thread_info *tp,
1033 const struct btrace_data_bts *btrace,
1034 VEC (bfun_s) **gaps)
1036 struct btrace_thread_info *btinfo;
1037 struct gdbarch *gdbarch;
1041 gdbarch = target_gdbarch ();
1042 btinfo = &tp->btrace;
1043 blk = VEC_length (btrace_block_s, btrace->blocks);
1045 if (btinfo->functions.empty ())
1048 level = -btinfo->level;
1052 btrace_block_s *block;
1057 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1062 struct btrace_function *bfun;
1063 struct btrace_insn insn;
1066 /* We should hit the end of the block. Warn if we went too far. */
1067 if (block->end < pc)
1069 /* Indicate the gap in the trace. */
1070 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
1072 VEC_safe_push (bfun_s, *gaps, bfun);
1074 warning (_("Recorded trace may be corrupted at instruction "
1075 "%u (pc = %s)."), bfun->insn_offset - 1,
1076 core_addr_to_string_nz (pc));
1081 bfun = ftrace_update_function (btinfo, pc);
1083 /* Maintain the function level offset.
1084 For all but the last block, we do it here. */
1086 level = std::min (level, bfun->level);
1091 size = gdb_insn_length (gdbarch, pc);
1093 CATCH (error, RETURN_MASK_ERROR)
1100 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1103 ftrace_update_insns (bfun, &insn);
1105 /* We're done once we pushed the instruction at the end. */
1106 if (block->end == pc)
1109 /* We can't continue if we fail to compute the size. */
1112 /* Indicate the gap in the trace. We just added INSN so we're
1113 not at the beginning. */
1114 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
1116 VEC_safe_push (bfun_s, *gaps, bfun);
1118 warning (_("Recorded trace may be incomplete at instruction %u "
1119 "(pc = %s)."), bfun->insn_offset - 1,
1120 core_addr_to_string_nz (pc));
1127 /* Maintain the function level offset.
1128 For the last block, we do it here to not consider the last
1130 Since the last instruction corresponds to the current instruction
1131 and is not really part of the execution history, it shouldn't
1132 affect the level. */
1134 level = std::min (level, bfun->level);
1138 /* LEVEL is the minimal function level of all btrace function segments.
1139 Define the global level offset to -LEVEL so all function levels are
1140 normalized to start at zero. */
1141 btinfo->level = -level;
1144 #if defined (HAVE_LIBIPT)
1146 static enum btrace_insn_class
1147 pt_reclassify_insn (enum pt_insn_class iclass)
1152 return BTRACE_INSN_CALL;
1155 return BTRACE_INSN_RETURN;
1158 return BTRACE_INSN_JUMP;
1161 return BTRACE_INSN_OTHER;
1165 /* Return the btrace instruction flags for INSN. */
1167 static btrace_insn_flags
1168 pt_btrace_insn_flags (const struct pt_insn &insn)
1170 btrace_insn_flags flags = 0;
1172 if (insn.speculative)
1173 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1178 /* Return the btrace instruction for INSN. */
1181 pt_btrace_insn (const struct pt_insn &insn)
1183 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1184 pt_reclassify_insn (insn.iclass),
1185 pt_btrace_insn_flags (insn)};
1189 /* Add function branch trace to BTINFO using DECODER. */
1192 ftrace_add_pt (struct btrace_thread_info *btinfo,
1193 struct pt_insn_decoder *decoder,
1195 VEC (bfun_s) **gaps)
1197 struct btrace_function *bfun;
1203 struct pt_insn insn;
1205 errcode = pt_insn_sync_forward (decoder);
1208 if (errcode != -pte_eos)
1209 warning (_("Failed to synchronize onto the Intel Processor "
1210 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1216 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1220 /* Look for gaps in the trace - unless we're at the beginning. */
1221 if (!btinfo->functions.empty ())
1223 /* Tracing is disabled and re-enabled each time we enter the
1224 kernel. Most times, we continue from the same instruction we
1225 stopped before. This is indicated via the RESUMED instruction
1226 flag. The ENABLED instruction flag means that we continued
1227 from some other instruction. Indicate this as a trace gap. */
1230 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
1232 VEC_safe_push (bfun_s, *gaps, bfun);
1234 pt_insn_get_offset (decoder, &offset);
1236 warning (_("Non-contiguous trace at instruction %u (offset "
1237 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1238 bfun->insn_offset - 1, offset, insn.ip);
1242 /* Indicate trace overflows. */
1245 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
1247 VEC_safe_push (bfun_s, *gaps, bfun);
1249 pt_insn_get_offset (decoder, &offset);
1251 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1252 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
1256 bfun = ftrace_update_function (btinfo, insn.ip);
1258 /* Maintain the function level offset. */
1259 *plevel = std::min (*plevel, bfun->level);
1261 btrace_insn btinsn = pt_btrace_insn (insn);
1262 ftrace_update_insns (bfun, &btinsn);
1265 if (errcode == -pte_eos)
1268 /* Indicate the gap in the trace. */
1269 bfun = ftrace_new_gap (btinfo, errcode);
1271 VEC_safe_push (bfun_s, *gaps, bfun);
1273 pt_insn_get_offset (decoder, &offset);
1275 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1276 ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
1277 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1281 /* A callback function to allow the trace decoder to read the inferior's
1285 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1286 const struct pt_asid *asid, uint64_t pc,
1289 int result, errcode;
1291 result = (int) size;
1294 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1296 result = -pte_nomap;
1298 CATCH (error, RETURN_MASK_ERROR)
1300 result = -pte_nomap;
1307 /* Translate the vendor from one enum to another. */
1309 static enum pt_cpu_vendor
1310 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1322 /* Finalize the function branch trace after decode. */
1324 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1325 struct thread_info *tp, int level)
1327 pt_insn_free_decoder (decoder);
1329 /* LEVEL is the minimal function level of all btrace function segments.
1330 Define the global level offset to -LEVEL so all function levels are
1331 normalized to start at zero. */
1332 tp->btrace.level = -level;
1334 /* Add a single last instruction entry for the current PC.
1335 This allows us to compute the backtrace at the current PC using both
1336 standard unwind and btrace unwind.
1337 This extra entry is ignored by all record commands. */
1341 /* Compute the function branch trace from Intel Processor Trace
1345 btrace_compute_ftrace_pt (struct thread_info *tp,
1346 const struct btrace_data_pt *btrace,
1347 VEC (bfun_s) **gaps)
1349 struct btrace_thread_info *btinfo;
1350 struct pt_insn_decoder *decoder;
1351 struct pt_config config;
1354 if (btrace->size == 0)
1357 btinfo = &tp->btrace;
1358 if (btinfo->functions.empty ())
1361 level = -btinfo->level;
1363 pt_config_init(&config);
1364 config.begin = btrace->data;
1365 config.end = btrace->data + btrace->size;
1367 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1368 config.cpu.family = btrace->config.cpu.family;
1369 config.cpu.model = btrace->config.cpu.model;
1370 config.cpu.stepping = btrace->config.cpu.stepping;
1372 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1374 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1375 pt_errstr (pt_errcode (errcode)));
1377 decoder = pt_insn_alloc_decoder (&config);
1378 if (decoder == NULL)
1379 error (_("Failed to allocate the Intel Processor Trace decoder."));
1383 struct pt_image *image;
1385 image = pt_insn_get_image(decoder);
1387 error (_("Failed to configure the Intel Processor Trace decoder."));
1389 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1391 error (_("Failed to configure the Intel Processor Trace decoder: "
1392 "%s."), pt_errstr (pt_errcode (errcode)));
1394 ftrace_add_pt (btinfo, decoder, &level, gaps);
1396 CATCH (error, RETURN_MASK_ALL)
1398 /* Indicate a gap in the trace if we quit trace processing. */
1399 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1401 struct btrace_function *bfun;
1403 bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
1405 VEC_safe_push (bfun_s, *gaps, bfun);
1408 btrace_finalize_ftrace_pt (decoder, tp, level);
1410 throw_exception (error);
1414 btrace_finalize_ftrace_pt (decoder, tp, level);
1417 #else /* defined (HAVE_LIBIPT) */
1420 btrace_compute_ftrace_pt (struct thread_info *tp,
1421 const struct btrace_data_pt *btrace,
1422 VEC (bfun_s) **gaps)
1424 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1427 #endif /* defined (HAVE_LIBIPT) */
1429 /* Compute the function branch trace from a block branch trace BTRACE for
1430 a thread given by BTINFO. */
1433 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1434 VEC (bfun_s) **gaps)
1436 DEBUG ("compute ftrace");
1438 switch (btrace->format)
1440 case BTRACE_FORMAT_NONE:
1443 case BTRACE_FORMAT_BTS:
1444 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1447 case BTRACE_FORMAT_PT:
1448 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1452 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1456 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1458 if (!VEC_empty (bfun_s, *gaps))
1460 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1461 btrace_bridge_gaps (tp, gaps);
1466 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1469 struct cleanup *old_chain;
1472 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1476 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1478 CATCH (error, RETURN_MASK_ALL)
1480 btrace_finalize_ftrace (tp, &gaps);
1482 throw_exception (error);
1486 btrace_finalize_ftrace (tp, &gaps);
1488 do_cleanups (old_chain);
1491 /* Add an entry for the current PC. */
1494 btrace_add_pc (struct thread_info *tp)
1496 struct btrace_data btrace;
1497 struct btrace_block *block;
1498 struct regcache *regcache;
1499 struct cleanup *cleanup;
1502 regcache = get_thread_regcache (tp->ptid);
1503 pc = regcache_read_pc (regcache);
1505 btrace_data_init (&btrace);
1506 btrace.format = BTRACE_FORMAT_BTS;
1507 btrace.variant.bts.blocks = NULL;
1509 cleanup = make_cleanup_btrace_data (&btrace);
1511 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1515 btrace_compute_ftrace (tp, &btrace);
1517 do_cleanups (cleanup);
1523 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1525 if (tp->btrace.target != NULL)
1528 #if !defined (HAVE_LIBIPT)
1529 if (conf->format == BTRACE_FORMAT_PT)
1530 error (_("GDB does not support Intel Processor Trace."));
1531 #endif /* !defined (HAVE_LIBIPT) */
1533 if (!target_supports_btrace (conf->format))
1534 error (_("Target does not support branch tracing."));
1536 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1537 target_pid_to_str (tp->ptid));
1539 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1541 /* We're done if we failed to enable tracing. */
1542 if (tp->btrace.target == NULL)
1545 /* We need to undo the enable in case of errors. */
1548 /* Add an entry for the current PC so we start tracing from where we
1551 If we can't access TP's registers, TP is most likely running. In this
1552 case, we can't really say where tracing was enabled so it should be
1553 safe to simply skip this step.
1555 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1556 start at the PC at which tracing was enabled. */
1557 if (conf->format != BTRACE_FORMAT_PT
1558 && can_access_registers_ptid (tp->ptid))
1561 CATCH (exception, RETURN_MASK_ALL)
1563 btrace_disable (tp);
1565 throw_exception (exception);
1572 const struct btrace_config *
1573 btrace_conf (const struct btrace_thread_info *btinfo)
1575 if (btinfo->target == NULL)
1578 return target_btrace_conf (btinfo->target);
1584 btrace_disable (struct thread_info *tp)
1586 struct btrace_thread_info *btp = &tp->btrace;
1589 if (btp->target == NULL)
1592 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1593 target_pid_to_str (tp->ptid));
1595 target_disable_btrace (btp->target);
1604 btrace_teardown (struct thread_info *tp)
1606 struct btrace_thread_info *btp = &tp->btrace;
1609 if (btp->target == NULL)
1612 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1613 target_pid_to_str (tp->ptid));
1615 target_teardown_btrace (btp->target);
1621 /* Stitch branch trace in BTS format. */
1624 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1626 struct btrace_thread_info *btinfo;
1627 struct btrace_function *last_bfun;
1628 struct btrace_insn *last_insn;
1629 btrace_block_s *first_new_block;
1631 btinfo = &tp->btrace;
1632 gdb_assert (!btinfo->functions.empty ());
1633 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1635 last_bfun = btinfo->functions.back ();
1637 /* If the existing trace ends with a gap, we just glue the traces
1638 together. We need to drop the last (i.e. chronologically first) block
1639 of the new trace, though, since we can't fill in the start address.*/
1640 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1642 VEC_pop (btrace_block_s, btrace->blocks);
1646 /* Beware that block trace starts with the most recent block, so the
1647 chronologically first block in the new trace is the last block in
1648 the new trace's block vector. */
1649 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1650 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1652 /* If the current PC at the end of the block is the same as in our current
1653 trace, there are two explanations:
1654 1. we executed the instruction and some branch brought us back.
1655 2. we have not made any progress.
1656 In the first case, the delta trace vector should contain at least two
1658 In the second case, the delta trace vector should contain exactly one
1659 entry for the partial block containing the current PC. Remove it. */
1660 if (first_new_block->end == last_insn->pc
1661 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1663 VEC_pop (btrace_block_s, btrace->blocks);
1667 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1668 core_addr_to_string_nz (first_new_block->end));
1670 /* Do a simple sanity check to make sure we don't accidentally end up
1671 with a bad block. This should not occur in practice. */
1672 if (first_new_block->end < last_insn->pc)
1674 warning (_("Error while trying to read delta trace. Falling back to "
1679 /* We adjust the last block to start at the end of our current trace. */
1680 gdb_assert (first_new_block->begin == 0);
1681 first_new_block->begin = last_insn->pc;
1683 /* We simply pop the last insn so we can insert it again as part of
1684 the normal branch trace computation.
1685 Since instruction iterators are based on indices in the instructions
1686 vector, we don't leave any pointers dangling. */
1687 DEBUG ("pruning insn at %s for stitching",
1688 ftrace_print_insn_addr (last_insn));
1690 VEC_pop (btrace_insn_s, last_bfun->insn);
1692 /* The instructions vector may become empty temporarily if this has
1693 been the only instruction in this function segment.
1694 This violates the invariant but will be remedied shortly by
1695 btrace_compute_ftrace when we add the new trace. */
1697 /* The only case where this would hurt is if the entire trace consisted
1698 of just that one instruction. If we remove it, we might turn the now
1699 empty btrace function segment into a gap. But we don't want gaps at
1700 the beginning. To avoid this, we remove the entire old trace. */
1701 if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
1707 /* Adjust the block trace in order to stitch old and new trace together.
1708 BTRACE is the new delta trace between the last and the current stop.
1709 TP is the traced thread.
1710 May modifx BTRACE as well as the existing trace in TP.
1711 Return 0 on success, -1 otherwise. */
1714 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1716 /* If we don't have trace, there's nothing to do. */
1717 if (btrace_data_empty (btrace))
1720 switch (btrace->format)
1722 case BTRACE_FORMAT_NONE:
1725 case BTRACE_FORMAT_BTS:
1726 return btrace_stitch_bts (&btrace->variant.bts, tp);
1728 case BTRACE_FORMAT_PT:
1729 /* Delta reads are not supported. */
1733 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1736 /* Clear the branch trace histories in BTINFO. */
1739 btrace_clear_history (struct btrace_thread_info *btinfo)
1741 xfree (btinfo->insn_history);
1742 xfree (btinfo->call_history);
1743 xfree (btinfo->replay);
1745 btinfo->insn_history = NULL;
1746 btinfo->call_history = NULL;
1747 btinfo->replay = NULL;
1750 /* Clear the branch trace maintenance histories in BTINFO. */
1753 btrace_maint_clear (struct btrace_thread_info *btinfo)
1755 switch (btinfo->data.format)
1760 case BTRACE_FORMAT_BTS:
1761 btinfo->maint.variant.bts.packet_history.begin = 0;
1762 btinfo->maint.variant.bts.packet_history.end = 0;
1765 #if defined (HAVE_LIBIPT)
1766 case BTRACE_FORMAT_PT:
1767 xfree (btinfo->maint.variant.pt.packets);
1769 btinfo->maint.variant.pt.packets = NULL;
1770 btinfo->maint.variant.pt.packet_history.begin = 0;
1771 btinfo->maint.variant.pt.packet_history.end = 0;
1773 #endif /* defined (HAVE_LIBIPT) */
1780 btrace_decode_error (enum btrace_format format, int errcode)
1784 case BTRACE_FORMAT_BTS:
1787 case BDE_BTS_OVERFLOW:
1788 return _("instruction overflow");
1790 case BDE_BTS_INSN_SIZE:
1791 return _("unknown instruction");
1798 #if defined (HAVE_LIBIPT)
1799 case BTRACE_FORMAT_PT:
1802 case BDE_PT_USER_QUIT:
1803 return _("trace decode cancelled");
1805 case BDE_PT_DISABLED:
1806 return _("disabled");
1808 case BDE_PT_OVERFLOW:
1809 return _("overflow");
1813 return pt_errstr (pt_errcode (errcode));
1817 #endif /* defined (HAVE_LIBIPT) */
1823 return _("unknown");
1829 btrace_fetch (struct thread_info *tp)
1831 struct btrace_thread_info *btinfo;
1832 struct btrace_target_info *tinfo;
1833 struct btrace_data btrace;
1834 struct cleanup *cleanup;
1837 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1838 target_pid_to_str (tp->ptid));
1840 btinfo = &tp->btrace;
1841 tinfo = btinfo->target;
1845 /* There's no way we could get new trace while replaying.
1846 On the other hand, delta trace would return a partial record with the
1847 current PC, which is the replay PC, not the last PC, as expected. */
1848 if (btinfo->replay != NULL)
1851 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1852 can store a gdb.Record object in Python referring to a different thread
1853 than the current one, temporarily set INFERIOR_PTID. */
1854 cleanup = save_inferior_ptid ();
1855 inferior_ptid = tp->ptid;
1857 /* We should not be called on running or exited threads. */
1858 gdb_assert (can_access_registers_ptid (tp->ptid));
1860 btrace_data_init (&btrace);
1861 make_cleanup_btrace_data (&btrace);
1863 /* Let's first try to extend the trace we already have. */
1864 if (!btinfo->functions.empty ())
1866 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1869 /* Success. Let's try to stitch the traces together. */
1870 errcode = btrace_stitch_trace (&btrace, tp);
1874 /* We failed to read delta trace. Let's try to read new trace. */
1875 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1877 /* If we got any new trace, discard what we have. */
1878 if (errcode == 0 && !btrace_data_empty (&btrace))
1882 /* If we were not able to read the trace, we start over. */
1886 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1890 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1892 /* If we were not able to read the branch trace, signal an error. */
1894 error (_("Failed to read branch trace."));
1896 /* Compute the trace, provided we have any. */
1897 if (!btrace_data_empty (&btrace))
1899 /* Store the raw trace data. The stored data will be cleared in
1900 btrace_clear, so we always append the new trace. */
1901 btrace_data_append (&btinfo->data, &btrace);
1902 btrace_maint_clear (btinfo);
1904 btrace_clear_history (btinfo);
1905 btrace_compute_ftrace (tp, &btrace);
1908 do_cleanups (cleanup);
1914 btrace_clear (struct thread_info *tp)
1916 struct btrace_thread_info *btinfo;
1918 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1919 target_pid_to_str (tp->ptid));
1921 /* Make sure btrace frames that may hold a pointer into the branch
1922 trace data are destroyed. */
1923 reinit_frame_cache ();
1925 btinfo = &tp->btrace;
1926 for (auto &bfun : btinfo->functions)
1928 VEC_free (btrace_insn_s, bfun->insn);
1932 btinfo->functions.clear ();
1935 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1936 btrace_maint_clear (btinfo);
1937 btrace_data_clear (&btinfo->data);
1938 btrace_clear_history (btinfo);
1944 btrace_free_objfile (struct objfile *objfile)
1946 struct thread_info *tp;
1948 DEBUG ("free objfile");
1950 ALL_NON_EXITED_THREADS (tp)
1954 #if defined (HAVE_LIBEXPAT)
1956 /* Check the btrace document version. */
1959 check_xml_btrace_version (struct gdb_xml_parser *parser,
1960 const struct gdb_xml_element *element,
1961 void *user_data, VEC (gdb_xml_value_s) *attributes)
1964 = (const char *) xml_find_attribute (attributes, "version")->value;
1966 if (strcmp (version, "1.0") != 0)
1967 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1970 /* Parse a btrace "block" xml record. */
1973 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1974 const struct gdb_xml_element *element,
1975 void *user_data, VEC (gdb_xml_value_s) *attributes)
1977 struct btrace_data *btrace;
1978 struct btrace_block *block;
1979 ULONGEST *begin, *end;
1981 btrace = (struct btrace_data *) user_data;
1983 switch (btrace->format)
1985 case BTRACE_FORMAT_BTS:
1988 case BTRACE_FORMAT_NONE:
1989 btrace->format = BTRACE_FORMAT_BTS;
1990 btrace->variant.bts.blocks = NULL;
1994 gdb_xml_error (parser, _("Btrace format error."));
1997 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1998 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
2000 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
2001 block->begin = *begin;
2005 /* Parse a "raw" xml record. */
2008 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
2009 gdb_byte **pdata, size_t *psize)
2011 struct cleanup *cleanup;
2012 gdb_byte *data, *bin;
2015 len = strlen (body_text);
2017 gdb_xml_error (parser, _("Bad raw data size."));
2021 bin = data = (gdb_byte *) xmalloc (size);
2022 cleanup = make_cleanup (xfree, data);
2024 /* We use hex encoding - see common/rsp-low.h. */
2032 if (hi == 0 || lo == 0)
2033 gdb_xml_error (parser, _("Bad hex encoding."));
2035 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2039 discard_cleanups (cleanup);
2045 /* Parse a btrace pt-config "cpu" xml record. */
2048 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2049 const struct gdb_xml_element *element,
2051 VEC (gdb_xml_value_s) *attributes)
2053 struct btrace_data *btrace;
2055 ULONGEST *family, *model, *stepping;
2057 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2058 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2059 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2060 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2062 btrace = (struct btrace_data *) user_data;
2064 if (strcmp (vendor, "GenuineIntel") == 0)
2065 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2067 btrace->variant.pt.config.cpu.family = *family;
2068 btrace->variant.pt.config.cpu.model = *model;
2069 btrace->variant.pt.config.cpu.stepping = *stepping;
2072 /* Parse a btrace pt "raw" xml record. */
2075 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2076 const struct gdb_xml_element *element,
2077 void *user_data, const char *body_text)
2079 struct btrace_data *btrace;
2081 btrace = (struct btrace_data *) user_data;
2082 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2083 &btrace->variant.pt.size);
2086 /* Parse a btrace "pt" xml record. */
2089 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2090 const struct gdb_xml_element *element,
2091 void *user_data, VEC (gdb_xml_value_s) *attributes)
2093 struct btrace_data *btrace;
2095 btrace = (struct btrace_data *) user_data;
2096 btrace->format = BTRACE_FORMAT_PT;
2097 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2098 btrace->variant.pt.data = NULL;
2099 btrace->variant.pt.size = 0;
2102 static const struct gdb_xml_attribute block_attributes[] = {
2103 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2104 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2105 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2108 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2109 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2110 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2111 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2112 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2113 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2116 static const struct gdb_xml_element btrace_pt_config_children[] = {
2117 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2118 parse_xml_btrace_pt_config_cpu, NULL },
2119 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2122 static const struct gdb_xml_element btrace_pt_children[] = {
2123 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2125 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2126 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2129 static const struct gdb_xml_attribute btrace_attributes[] = {
2130 { "version", GDB_XML_AF_NONE, NULL, NULL },
2131 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2134 static const struct gdb_xml_element btrace_children[] = {
2135 { "block", block_attributes, NULL,
2136 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2137 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2139 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2142 static const struct gdb_xml_element btrace_elements[] = {
2143 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2144 check_xml_btrace_version, NULL },
2145 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2148 #endif /* defined (HAVE_LIBEXPAT) */
2153 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2155 struct cleanup *cleanup;
2158 #if defined (HAVE_LIBEXPAT)
2160 btrace->format = BTRACE_FORMAT_NONE;
2162 cleanup = make_cleanup_btrace_data (btrace);
2163 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2166 error (_("Error parsing branch trace."));
2168 /* Keep parse results. */
2169 discard_cleanups (cleanup);
2171 #else /* !defined (HAVE_LIBEXPAT) */
2173 error (_("Cannot process branch trace. XML parsing is not supported."));
2175 #endif /* !defined (HAVE_LIBEXPAT) */
2178 #if defined (HAVE_LIBEXPAT)
2180 /* Parse a btrace-conf "bts" xml record. */
2183 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2184 const struct gdb_xml_element *element,
2185 void *user_data, VEC (gdb_xml_value_s) *attributes)
2187 struct btrace_config *conf;
2188 struct gdb_xml_value *size;
2190 conf = (struct btrace_config *) user_data;
2191 conf->format = BTRACE_FORMAT_BTS;
2194 size = xml_find_attribute (attributes, "size");
2196 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2199 /* Parse a btrace-conf "pt" xml record. */
2202 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2203 const struct gdb_xml_element *element,
2204 void *user_data, VEC (gdb_xml_value_s) *attributes)
2206 struct btrace_config *conf;
2207 struct gdb_xml_value *size;
2209 conf = (struct btrace_config *) user_data;
2210 conf->format = BTRACE_FORMAT_PT;
2213 size = xml_find_attribute (attributes, "size");
2215 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2218 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2219 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2220 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2223 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2224 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2225 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2228 static const struct gdb_xml_element btrace_conf_children[] = {
2229 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2230 parse_xml_btrace_conf_bts, NULL },
2231 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2232 parse_xml_btrace_conf_pt, NULL },
2233 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2236 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2237 { "version", GDB_XML_AF_NONE, NULL, NULL },
2238 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2241 static const struct gdb_xml_element btrace_conf_elements[] = {
2242 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2243 GDB_XML_EF_NONE, NULL, NULL },
2244 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2247 #endif /* defined (HAVE_LIBEXPAT) */
2252 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2256 #if defined (HAVE_LIBEXPAT)
2258 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2259 btrace_conf_elements, xml, conf);
2261 error (_("Error parsing branch trace configuration."));
2263 #else /* !defined (HAVE_LIBEXPAT) */
2265 error (_("XML parsing is not supported."));
2267 #endif /* !defined (HAVE_LIBEXPAT) */
2272 const struct btrace_insn *
2273 btrace_insn_get (const struct btrace_insn_iterator *it)
2275 const struct btrace_function *bfun;
2276 unsigned int index, end;
2278 index = it->insn_index;
2279 bfun = it->btinfo->functions[it->call_index];
2281 /* Check if the iterator points to a gap in the trace. */
2282 if (bfun->errcode != 0)
2285 /* The index is within the bounds of this function's instruction vector. */
2286 end = VEC_length (btrace_insn_s, bfun->insn);
2287 gdb_assert (0 < end);
2288 gdb_assert (index < end);
2290 return VEC_index (btrace_insn_s, bfun->insn, index);
2296 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2298 const struct btrace_function *bfun;
2300 bfun = it->btinfo->functions[it->call_index];
2301 return bfun->errcode;
2307 btrace_insn_number (const struct btrace_insn_iterator *it)
2309 const struct btrace_function *bfun;
2311 bfun = it->btinfo->functions[it->call_index];
2312 return bfun->insn_offset + it->insn_index;
2318 btrace_insn_begin (struct btrace_insn_iterator *it,
2319 const struct btrace_thread_info *btinfo)
2321 if (btinfo->functions.empty ())
2322 error (_("No trace."));
2324 it->btinfo = btinfo;
2332 btrace_insn_end (struct btrace_insn_iterator *it,
2333 const struct btrace_thread_info *btinfo)
2335 const struct btrace_function *bfun;
2336 unsigned int length;
2338 if (btinfo->functions.empty ())
2339 error (_("No trace."));
2341 bfun = btinfo->functions.back ();
2342 length = VEC_length (btrace_insn_s, bfun->insn);
2344 /* The last function may either be a gap or it contains the current
2345 instruction, which is one past the end of the execution trace; ignore
2350 it->btinfo = btinfo;
2351 it->call_index = bfun->number - 1;
2352 it->insn_index = length;
2358 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2360 const struct btrace_function *bfun;
2361 unsigned int index, steps;
2363 bfun = it->btinfo->functions[it->call_index];
2365 index = it->insn_index;
2369 unsigned int end, space, adv;
2371 end = VEC_length (btrace_insn_s, bfun->insn);
2373 /* An empty function segment represents a gap in the trace. We count
2374 it as one instruction. */
2377 const struct btrace_function *next;
2379 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2392 gdb_assert (0 < end);
2393 gdb_assert (index < end);
2395 /* Compute the number of instructions remaining in this segment. */
2396 space = end - index;
2398 /* Advance the iterator as far as possible within this segment. */
2399 adv = std::min (space, stride);
2404 /* Move to the next function if we're at the end of this one. */
2407 const struct btrace_function *next;
2409 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2412 /* We stepped past the last function.
2414 Let's adjust the index to point to the last instruction in
2415 the previous function. */
2421 /* We now point to the first instruction in the new function. */
2426 /* We did make progress. */
2427 gdb_assert (adv > 0);
2430 /* Update the iterator. */
2431 it->call_index = bfun->number - 1;
2432 it->insn_index = index;
2440 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2442 const struct btrace_function *bfun;
2443 unsigned int index, steps;
2445 bfun = it->btinfo->functions[it->call_index];
2447 index = it->insn_index;
2453 /* Move to the previous function if we're at the start of this one. */
2456 const struct btrace_function *prev;
2458 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2462 /* We point to one after the last instruction in the new function. */
2464 index = VEC_length (btrace_insn_s, bfun->insn);
2466 /* An empty function segment represents a gap in the trace. We count
2467 it as one instruction. */
2477 /* Advance the iterator as far as possible within this segment. */
2478 adv = std::min (index, stride);
2484 /* We did make progress. */
2485 gdb_assert (adv > 0);
2488 /* Update the iterator. */
2489 it->call_index = bfun->number - 1;
2490 it->insn_index = index;
2498 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2499 const struct btrace_insn_iterator *rhs)
2501 gdb_assert (lhs->btinfo == rhs->btinfo);
2503 if (lhs->call_index != rhs->call_index)
2504 return lhs->call_index - rhs->call_index;
2506 return lhs->insn_index - rhs->insn_index;
2512 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2513 const struct btrace_thread_info *btinfo,
2514 unsigned int number)
2516 const struct btrace_function *bfun;
2517 unsigned int upper, lower;
2519 if (btinfo->functions.empty ())
2523 bfun = btinfo->functions[lower];
2524 if (number < bfun->insn_offset)
2527 upper = btinfo->functions.size () - 1;
2528 bfun = btinfo->functions[upper];
2529 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2532 /* We assume that there are no holes in the numbering. */
2535 const unsigned int average = lower + (upper - lower) / 2;
2537 bfun = btinfo->functions[average];
2539 if (number < bfun->insn_offset)
2541 upper = average - 1;
2545 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2547 lower = average + 1;
2554 it->btinfo = btinfo;
2555 it->call_index = bfun->number - 1;
2556 it->insn_index = number - bfun->insn_offset;
2560 /* Returns true if the recording ends with a function segment that
2561 contains only a single (i.e. the current) instruction. */
2564 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2566 const btrace_function *bfun;
2568 if (btinfo->functions.empty ())
2571 bfun = btinfo->functions.back ();
2572 if (bfun->errcode != 0)
2575 return ftrace_call_num_insn (bfun) == 1;
2580 const struct btrace_function *
2581 btrace_call_get (const struct btrace_call_iterator *it)
2583 if (it->index >= it->btinfo->functions.size ())
2586 return it->btinfo->functions[it->index];
2592 btrace_call_number (const struct btrace_call_iterator *it)
2594 const unsigned int length = it->btinfo->functions.size ();
2596 /* If the last function segment contains only a single instruction (i.e. the
2597 current instruction), skip it. */
2598 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2601 return it->index + 1;
2607 btrace_call_begin (struct btrace_call_iterator *it,
2608 const struct btrace_thread_info *btinfo)
2610 if (btinfo->functions.empty ())
2611 error (_("No trace."));
2613 it->btinfo = btinfo;
2620 btrace_call_end (struct btrace_call_iterator *it,
2621 const struct btrace_thread_info *btinfo)
2623 if (btinfo->functions.empty ())
2624 error (_("No trace."));
2626 it->btinfo = btinfo;
2627 it->index = btinfo->functions.size ();
2633 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2635 const unsigned int length = it->btinfo->functions.size ();
2637 if (it->index + stride < length - 1)
2638 /* Default case: Simply advance the iterator. */
2639 it->index += stride;
2640 else if (it->index + stride == length - 1)
2642 /* We land exactly at the last function segment. If it contains only one
2643 instruction (i.e. the current instruction) it is not actually part of
2645 if (btrace_ends_with_single_insn (it->btinfo))
2648 it->index = length - 1;
2652 /* We land past the last function segment and have to adjust the stride.
2653 If the last function segment contains only one instruction (i.e. the
2654 current instruction) it is not actually part of the trace. */
2655 if (btrace_ends_with_single_insn (it->btinfo))
2656 stride = length - it->index - 1;
2658 stride = length - it->index;
2669 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2671 const unsigned int length = it->btinfo->functions.size ();
2674 gdb_assert (it->index <= length);
2676 if (stride == 0 || it->index == 0)
2679 /* If we are at the end, the first step is a special case. If the last
2680 function segment contains only one instruction (i.e. the current
2681 instruction) it is not actually part of the trace. To be able to step
2682 over this instruction, we need at least one more function segment. */
2683 if ((it->index == length) && (length > 1))
2685 if (btrace_ends_with_single_insn (it->btinfo))
2686 it->index = length - 2;
2688 it->index = length - 1;
2694 stride = std::min (stride, it->index);
2696 it->index -= stride;
2697 return steps + stride;
2703 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2704 const struct btrace_call_iterator *rhs)
2706 gdb_assert (lhs->btinfo == rhs->btinfo);
2707 return (int) (lhs->index - rhs->index);
2713 btrace_find_call_by_number (struct btrace_call_iterator *it,
2714 const struct btrace_thread_info *btinfo,
2715 unsigned int number)
2717 const unsigned int length = btinfo->functions.size ();
2719 if ((number == 0) || (number > length))
2722 it->btinfo = btinfo;
2723 it->index = number - 1;
2730 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2731 const struct btrace_insn_iterator *begin,
2732 const struct btrace_insn_iterator *end)
2734 if (btinfo->insn_history == NULL)
2735 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2737 btinfo->insn_history->begin = *begin;
2738 btinfo->insn_history->end = *end;
2744 btrace_set_call_history (struct btrace_thread_info *btinfo,
2745 const struct btrace_call_iterator *begin,
2746 const struct btrace_call_iterator *end)
2748 gdb_assert (begin->btinfo == end->btinfo);
2750 if (btinfo->call_history == NULL)
2751 btinfo->call_history = XCNEW (struct btrace_call_history);
2753 btinfo->call_history->begin = *begin;
2754 btinfo->call_history->end = *end;
2760 btrace_is_replaying (struct thread_info *tp)
2762 return tp->btrace.replay != NULL;
2768 btrace_is_empty (struct thread_info *tp)
2770 struct btrace_insn_iterator begin, end;
2771 struct btrace_thread_info *btinfo;
2773 btinfo = &tp->btrace;
2775 if (btinfo->functions.empty ())
2778 btrace_insn_begin (&begin, btinfo);
2779 btrace_insn_end (&end, btinfo);
2781 return btrace_insn_cmp (&begin, &end) == 0;
2784 /* Forward the cleanup request. */
2787 do_btrace_data_cleanup (void *arg)
2789 btrace_data_fini ((struct btrace_data *) arg);
2795 make_cleanup_btrace_data (struct btrace_data *data)
2797 return make_cleanup (do_btrace_data_cleanup, data);
2800 #if defined (HAVE_LIBIPT)
2802 /* Print a single packet. */
2805 pt_print_packet (const struct pt_packet *packet)
2807 switch (packet->type)
2810 printf_unfiltered (("[??: %x]"), packet->type);
2814 printf_unfiltered (("psb"));
2818 printf_unfiltered (("psbend"));
2822 printf_unfiltered (("pad"));
2826 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2827 packet->payload.ip.ipc,
2828 packet->payload.ip.ip);
2832 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2833 packet->payload.ip.ipc,
2834 packet->payload.ip.ip);
2838 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2839 packet->payload.ip.ipc,
2840 packet->payload.ip.ip);
2844 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2845 packet->payload.ip.ipc,
2846 packet->payload.ip.ip);
2850 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2851 packet->payload.tnt.bit_size,
2852 packet->payload.tnt.payload);
2856 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2857 packet->payload.tnt.bit_size,
2858 packet->payload.tnt.payload);
2862 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2863 packet->payload.pip.nr ? (" nr") : (""));
2867 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2871 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2875 switch (packet->payload.mode.leaf)
2878 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2882 printf_unfiltered (("mode.exec%s%s"),
2883 packet->payload.mode.bits.exec.csl
2885 packet->payload.mode.bits.exec.csd
2886 ? (" cs.d") : (""));
2890 printf_unfiltered (("mode.tsx%s%s"),
2891 packet->payload.mode.bits.tsx.intx
2893 packet->payload.mode.bits.tsx.abrt
2894 ? (" abrt") : (""));
2900 printf_unfiltered (("ovf"));
2904 printf_unfiltered (("stop"));
2908 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2912 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2913 packet->payload.tma.fc);
2917 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2921 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2925 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2930 /* Decode packets into MAINT using DECODER. */
2933 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2934 struct pt_packet_decoder *decoder)
2940 struct btrace_pt_packet packet;
2942 errcode = pt_pkt_sync_forward (decoder);
2948 pt_pkt_get_offset (decoder, &packet.offset);
2950 errcode = pt_pkt_next (decoder, &packet.packet,
2951 sizeof(packet.packet));
2955 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2957 packet.errcode = pt_errcode (errcode);
2958 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2963 if (errcode == -pte_eos)
2966 packet.errcode = pt_errcode (errcode);
2967 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2970 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2971 packet.offset, pt_errstr (packet.errcode));
2974 if (errcode != -pte_eos)
2975 warning (_("Failed to synchronize onto the Intel Processor Trace "
2976 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2979 /* Update the packet history in BTINFO. */
2982 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2984 volatile struct gdb_exception except;
2985 struct pt_packet_decoder *decoder;
2986 struct btrace_data_pt *pt;
2987 struct pt_config config;
2990 pt = &btinfo->data.variant.pt;
2992 /* Nothing to do if there is no trace. */
2996 memset (&config, 0, sizeof(config));
2998 config.size = sizeof (config);
2999 config.begin = pt->data;
3000 config.end = pt->data + pt->size;
3002 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
3003 config.cpu.family = pt->config.cpu.family;
3004 config.cpu.model = pt->config.cpu.model;
3005 config.cpu.stepping = pt->config.cpu.stepping;
3007 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3009 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
3010 pt_errstr (pt_errcode (errcode)));
3012 decoder = pt_pkt_alloc_decoder (&config);
3013 if (decoder == NULL)
3014 error (_("Failed to allocate the Intel Processor Trace decoder."));
3018 btrace_maint_decode_pt (&btinfo->maint, decoder);
3020 CATCH (except, RETURN_MASK_ALL)
3022 pt_pkt_free_decoder (decoder);
3024 if (except.reason < 0)
3025 throw_exception (except);
3029 pt_pkt_free_decoder (decoder);
3032 #endif /* !defined (HAVE_LIBIPT) */
3034 /* Update the packet maintenance information for BTINFO and store the
3035 low and high bounds into BEGIN and END, respectively.
3036 Store the current iterator state into FROM and TO. */
3039 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3040 unsigned int *begin, unsigned int *end,
3041 unsigned int *from, unsigned int *to)
3043 switch (btinfo->data.format)
3052 case BTRACE_FORMAT_BTS:
3053 /* Nothing to do - we operate directly on BTINFO->DATA. */
3055 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3056 *from = btinfo->maint.variant.bts.packet_history.begin;
3057 *to = btinfo->maint.variant.bts.packet_history.end;
3060 #if defined (HAVE_LIBIPT)
3061 case BTRACE_FORMAT_PT:
3062 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3063 btrace_maint_update_pt_packets (btinfo);
3066 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3067 *from = btinfo->maint.variant.pt.packet_history.begin;
3068 *to = btinfo->maint.variant.pt.packet_history.end;
3070 #endif /* defined (HAVE_LIBIPT) */
3074 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3075 update the current iterator position. */
3078 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3079 unsigned int begin, unsigned int end)
3081 switch (btinfo->data.format)
3086 case BTRACE_FORMAT_BTS:
3088 VEC (btrace_block_s) *blocks;
3091 blocks = btinfo->data.variant.bts.blocks;
3092 for (blk = begin; blk < end; ++blk)
3094 const btrace_block_s *block;
3096 block = VEC_index (btrace_block_s, blocks, blk);
3098 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3099 core_addr_to_string_nz (block->begin),
3100 core_addr_to_string_nz (block->end));
3103 btinfo->maint.variant.bts.packet_history.begin = begin;
3104 btinfo->maint.variant.bts.packet_history.end = end;
3108 #if defined (HAVE_LIBIPT)
3109 case BTRACE_FORMAT_PT:
3111 VEC (btrace_pt_packet_s) *packets;
3114 packets = btinfo->maint.variant.pt.packets;
3115 for (pkt = begin; pkt < end; ++pkt)
3117 const struct btrace_pt_packet *packet;
3119 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3121 printf_unfiltered ("%u\t", pkt);
3122 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3124 if (packet->errcode == pte_ok)
3125 pt_print_packet (&packet->packet);
3127 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3129 printf_unfiltered ("\n");
3132 btinfo->maint.variant.pt.packet_history.begin = begin;
3133 btinfo->maint.variant.pt.packet_history.end = end;
3136 #endif /* defined (HAVE_LIBIPT) */
3140 /* Read a number from an argument string. */
3143 get_uint (char **arg)
3145 char *begin, *end, *pos;
3146 unsigned long number;
3149 pos = skip_spaces (begin);
3151 if (!isdigit (*pos))
3152 error (_("Expected positive number, got: %s."), pos);
3154 number = strtoul (pos, &end, 10);
3155 if (number > UINT_MAX)
3156 error (_("Number too big."));
3158 *arg += (end - begin);
3160 return (unsigned int) number;
3163 /* Read a context size from an argument string. */
3166 get_context_size (char **arg)
3171 pos = skip_spaces (*arg);
3173 if (!isdigit (*pos))
3174 error (_("Expected positive number, got: %s."), pos);
3176 return strtol (pos, arg, 10);
3179 /* Complain about junk at the end of an argument string. */
3182 no_chunk (char *arg)
3185 error (_("Junk after argument: %s."), arg);
3188 /* The "maintenance btrace packet-history" command. */
3191 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3193 struct btrace_thread_info *btinfo;
3194 struct thread_info *tp;
3195 unsigned int size, begin, end, from, to;
3197 tp = find_thread_ptid (inferior_ptid);
3199 error (_("No thread."));
3202 btinfo = &tp->btrace;
3204 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3207 printf_unfiltered (_("No trace.\n"));
3211 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3215 if (end - from < size)
3219 else if (strcmp (arg, "-") == 0)
3223 if (to - begin < size)
3229 from = get_uint (&arg);
3231 error (_("'%u' is out of range."), from);
3233 arg = skip_spaces (arg);
3236 arg = skip_spaces (++arg);
3241 size = get_context_size (&arg);
3245 if (end - from < size)
3249 else if (*arg == '-')
3252 size = get_context_size (&arg);
3256 /* Include the packet given as first argument. */
3260 if (to - begin < size)
3266 to = get_uint (&arg);
3268 /* Include the packet at the second argument and silently
3269 truncate the range. */
3282 if (end - from < size)
3290 btrace_maint_print_packets (btinfo, from, to);
3293 /* The "maintenance btrace clear-packet-history" command. */
3296 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3298 struct btrace_thread_info *btinfo;
3299 struct thread_info *tp;
3301 if (args != NULL && *args != 0)
3302 error (_("Invalid argument."));
3304 tp = find_thread_ptid (inferior_ptid);
3306 error (_("No thread."));
3308 btinfo = &tp->btrace;
3310 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3311 btrace_maint_clear (btinfo);
3312 btrace_data_clear (&btinfo->data);
3315 /* The "maintenance btrace clear" command. */
3318 maint_btrace_clear_cmd (char *args, int from_tty)
3320 struct btrace_thread_info *btinfo;
3321 struct thread_info *tp;
3323 if (args != NULL && *args != 0)
3324 error (_("Invalid argument."));
3326 tp = find_thread_ptid (inferior_ptid);
3328 error (_("No thread."));
3333 /* The "maintenance btrace" command. */
3336 maint_btrace_cmd (char *args, int from_tty)
3338 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3342 /* The "maintenance set btrace" command. */
3345 maint_btrace_set_cmd (char *args, int from_tty)
3347 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3351 /* The "maintenance show btrace" command. */
3354 maint_btrace_show_cmd (char *args, int from_tty)
3356 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3357 all_commands, gdb_stdout);
3360 /* The "maintenance set btrace pt" command. */
3363 maint_btrace_pt_set_cmd (char *args, int from_tty)
3365 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3366 all_commands, gdb_stdout);
3369 /* The "maintenance show btrace pt" command. */
3372 maint_btrace_pt_show_cmd (char *args, int from_tty)
3374 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3375 all_commands, gdb_stdout);
3378 /* The "maintenance info btrace" command. */
3381 maint_info_btrace_cmd (char *args, int from_tty)
3383 struct btrace_thread_info *btinfo;
3384 struct thread_info *tp;
3385 const struct btrace_config *conf;
3387 if (args != NULL && *args != 0)
3388 error (_("Invalid argument."));
3390 tp = find_thread_ptid (inferior_ptid);
3392 error (_("No thread."));
3394 btinfo = &tp->btrace;
3396 conf = btrace_conf (btinfo);
3398 error (_("No btrace configuration."));
3400 printf_unfiltered (_("Format: %s.\n"),
3401 btrace_format_string (conf->format));
3403 switch (conf->format)
3408 case BTRACE_FORMAT_BTS:
3409 printf_unfiltered (_("Number of packets: %u.\n"),
3410 VEC_length (btrace_block_s,
3411 btinfo->data.variant.bts.blocks));
3414 #if defined (HAVE_LIBIPT)
3415 case BTRACE_FORMAT_PT:
3417 struct pt_version version;
3419 version = pt_library_version ();
3420 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3421 version.minor, version.build,
3422 version.ext != NULL ? version.ext : "");
3424 btrace_maint_update_pt_packets (btinfo);
3425 printf_unfiltered (_("Number of packets: %u.\n"),
3426 VEC_length (btrace_pt_packet_s,
3427 btinfo->maint.variant.pt.packets));
3430 #endif /* defined (HAVE_LIBIPT) */
3434 /* The "maint show btrace pt skip-pad" show value function. */
3437 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3438 struct cmd_list_element *c,
3441 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3445 /* Initialize btrace maintenance commands. */
3447 void _initialize_btrace (void);
3449 _initialize_btrace (void)
3451 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3452 _("Info about branch tracing data."), &maintenanceinfolist);
3454 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3455 _("Branch tracing maintenance commands."),
3456 &maint_btrace_cmdlist, "maintenance btrace ",
3457 0, &maintenancelist);
3459 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3460 Set branch tracing specific variables."),
3461 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3462 0, &maintenance_set_cmdlist);
3464 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3465 Set Intel Processor Trace specific variables."),
3466 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3467 0, &maint_btrace_set_cmdlist);
3469 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3470 Show branch tracing specific variables."),
3471 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3472 0, &maintenance_show_cmdlist);
3474 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3475 Show Intel Processor Trace specific variables."),
3476 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3477 0, &maint_btrace_show_cmdlist);
3479 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3480 &maint_btrace_pt_skip_pad, _("\
3481 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3482 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3483 When enabled, PAD packets are ignored in the btrace packet history."),
3484 NULL, show_maint_btrace_pt_skip_pad,
3485 &maint_btrace_pt_set_cmdlist,
3486 &maint_btrace_pt_show_cmdlist);
3488 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3489 _("Print the raw branch tracing data.\n\
3490 With no argument, print ten more packets after the previous ten-line print.\n\
3491 With '-' as argument print ten packets before a previous ten-line print.\n\
3492 One argument specifies the starting packet of a ten-line print.\n\
3493 Two arguments with comma between specify starting and ending packets to \
3495 Preceded with '+'/'-' the second argument specifies the distance from the \
3497 &maint_btrace_cmdlist);
3499 add_cmd ("clear-packet-history", class_maintenance,
3500 maint_btrace_clear_packet_history_cmd,
3501 _("Clears the branch tracing packet history.\n\
3502 Discards the raw branch tracing data but not the execution history data.\n\
3504 &maint_btrace_cmdlist);
3506 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3507 _("Clears the branch tracing data.\n\
3508 Discards the raw branch tracing data and the execution history data.\n\
3509 The next 'record' command will fetch the branch tracing data anew.\n\
3511 &maint_btrace_cmdlist);