1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 /* Local non-gdb includes. */
26 #include "cli/cli-utils.h"
27 #include "common/rsp-low.h"
29 #include "filenames.h"
31 #include "gdbthread.h"
38 #include "xml-support.h"
40 /* For maintenance commands. */
41 #include "record-btrace.h"
47 /* Command lists for btrace maintenance commands. */
48 static struct cmd_list_element *maint_btrace_cmdlist;
49 static struct cmd_list_element *maint_btrace_set_cmdlist;
50 static struct cmd_list_element *maint_btrace_show_cmdlist;
51 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
52 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
54 /* Control whether to skip PAD packets when computing the packet history. */
55 static int maint_btrace_pt_skip_pad = 1;
57 static void btrace_add_pc (struct thread_info *tp);
59 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
60 when used in if statements. */
62 #define DEBUG(msg, args...) \
65 if (record_debug != 0) \
66 fprintf_unfiltered (gdb_stdlog, \
67 "[btrace] " msg "\n", ##args); \
71 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
73 /* Return the function name of a recorded function segment for printing.
74 This function never returns NULL. */
77 ftrace_print_function_name (const struct btrace_function *bfun)
79 struct minimal_symbol *msym;
86 return SYMBOL_PRINT_NAME (sym);
89 return MSYMBOL_PRINT_NAME (msym);
94 /* Return the file name of a recorded function segment for printing.
95 This function never returns NULL. */
98 ftrace_print_filename (const struct btrace_function *bfun)
101 const char *filename;
106 filename = symtab_to_filename_for_display (symbol_symtab (sym));
108 filename = "<unknown>";
113 /* Return a string representation of the address of an instruction.
114 This function never returns NULL. */
117 ftrace_print_insn_addr (const struct btrace_insn *insn)
122 return core_addr_to_string_nz (insn->pc);
125 /* Print an ftrace debug status message. */
128 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
130 const char *fun, *file;
131 unsigned int ibegin, iend;
134 fun = ftrace_print_function_name (bfun);
135 file = ftrace_print_filename (bfun);
138 ibegin = bfun->insn_offset;
139 iend = ibegin + bfun->insn.size ();
141 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
142 prefix, fun, file, level, ibegin, iend);
145 /* Return the number of instructions in a given function call segment. */
148 ftrace_call_num_insn (const struct btrace_function* bfun)
153 /* A gap is always counted as one instruction. */
154 if (bfun->errcode != 0)
157 return bfun->insn.size ();
160 /* Return the function segment with the given NUMBER or NULL if no such segment
161 exists. BTINFO is the branch trace information for the current thread. */
163 static struct btrace_function *
164 ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
167 if (number == 0 || number > btinfo->functions.size ())
170 return &btinfo->functions[number - 1];
173 /* A const version of the function above. */
175 static const struct btrace_function *
176 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
179 if (number == 0 || number > btinfo->functions.size ())
182 return &btinfo->functions[number - 1];
185 /* Return non-zero if BFUN does not match MFUN and FUN,
186 return zero otherwise. */
189 ftrace_function_switched (const struct btrace_function *bfun,
190 const struct minimal_symbol *mfun,
191 const struct symbol *fun)
193 struct minimal_symbol *msym;
199 /* If the minimal symbol changed, we certainly switched functions. */
200 if (mfun != NULL && msym != NULL
201 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
204 /* If the symbol changed, we certainly switched functions. */
205 if (fun != NULL && sym != NULL)
207 const char *bfname, *fname;
209 /* Check the function name. */
210 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
213 /* Check the location of those functions, as well. */
214 bfname = symtab_to_fullname (symbol_symtab (sym));
215 fname = symtab_to_fullname (symbol_symtab (fun));
216 if (filename_cmp (fname, bfname) != 0)
220 /* If we lost symbol information, we switched functions. */
221 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
224 /* If we gained symbol information, we switched functions. */
225 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
231 /* Allocate and initialize a new branch trace function segment at the end of
233 BTINFO is the branch trace information for the current thread.
234 MFUN and FUN are the symbol information we have for this function.
235 This invalidates all struct btrace_function pointer currently held. */
237 static struct btrace_function *
238 ftrace_new_function (struct btrace_thread_info *btinfo,
239 struct minimal_symbol *mfun,
243 unsigned int number, insn_offset;
245 if (btinfo->functions.empty ())
247 /* Start counting NUMBER and INSN_OFFSET at one. */
254 const struct btrace_function *prev = &btinfo->functions.back ();
256 number = prev->number + 1;
257 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
260 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
261 return &btinfo->functions.back ();
264 /* Update the UP field of a function segment. */
267 ftrace_update_caller (struct btrace_function *bfun,
268 struct btrace_function *caller,
269 enum btrace_function_flag flags)
272 ftrace_debug (bfun, "updating caller");
274 bfun->up = caller->number;
277 ftrace_debug (bfun, "set caller");
278 ftrace_debug (caller, "..to");
281 /* Fix up the caller for all segments of a function. */
284 ftrace_fixup_caller (struct btrace_thread_info *btinfo,
285 struct btrace_function *bfun,
286 struct btrace_function *caller,
287 enum btrace_function_flag flags)
289 unsigned int prev, next;
293 ftrace_update_caller (bfun, caller, flags);
295 /* Update all function segments belonging to the same function. */
296 for (; prev != 0; prev = bfun->prev)
298 bfun = ftrace_find_call_by_number (btinfo, prev);
299 ftrace_update_caller (bfun, caller, flags);
302 for (; next != 0; next = bfun->next)
304 bfun = ftrace_find_call_by_number (btinfo, next);
305 ftrace_update_caller (bfun, caller, flags);
309 /* Add a new function segment for a call at the end of the trace.
310 BTINFO is the branch trace information for the current thread.
311 MFUN and FUN are the symbol information we have for this function. */
313 static struct btrace_function *
314 ftrace_new_call (struct btrace_thread_info *btinfo,
315 struct minimal_symbol *mfun,
318 const unsigned int length = btinfo->functions.size ();
319 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
324 ftrace_debug (bfun, "new call");
329 /* Add a new function segment for a tail call at the end of the trace.
330 BTINFO is the branch trace information for the current thread.
331 MFUN and FUN are the symbol information we have for this function. */
333 static struct btrace_function *
334 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
335 struct minimal_symbol *mfun,
338 const unsigned int length = btinfo->functions.size ();
339 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
343 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
345 ftrace_debug (bfun, "new tail call");
350 /* Return the caller of BFUN or NULL if there is none. This function skips
351 tail calls in the call chain. BTINFO is the branch trace information for
352 the current thread. */
353 static struct btrace_function *
354 ftrace_get_caller (struct btrace_thread_info *btinfo,
355 struct btrace_function *bfun)
357 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
358 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
359 return ftrace_find_call_by_number (btinfo, bfun->up);
364 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
365 symbol information. BTINFO is the branch trace information for the current
368 static struct btrace_function *
369 ftrace_find_caller (struct btrace_thread_info *btinfo,
370 struct btrace_function *bfun,
371 struct minimal_symbol *mfun,
374 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
376 /* Skip functions with incompatible symbol information. */
377 if (ftrace_function_switched (bfun, mfun, fun))
380 /* This is the function segment we're looking for. */
387 /* Find the innermost caller in the back trace of BFUN, skipping all
388 function segments that do not end with a call instruction (e.g.
389 tail calls ending with a jump). BTINFO is the branch trace information for
390 the current thread. */
392 static struct btrace_function *
393 ftrace_find_call (struct btrace_thread_info *btinfo,
394 struct btrace_function *bfun)
396 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
399 if (bfun->errcode != 0)
402 btrace_insn &last = bfun->insn.back ();
404 if (last.iclass == BTRACE_INSN_CALL)
411 /* Add a continuation segment for a function into which we return at the end of
413 BTINFO is the branch trace information for the current thread.
414 MFUN and FUN are the symbol information we have for this function. */
416 static struct btrace_function *
417 ftrace_new_return (struct btrace_thread_info *btinfo,
418 struct minimal_symbol *mfun,
421 struct btrace_function *prev, *bfun, *caller;
423 bfun = ftrace_new_function (btinfo, mfun, fun);
424 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
426 /* It is important to start at PREV's caller. Otherwise, we might find
427 PREV itself, if PREV is a recursive function. */
428 caller = ftrace_find_call_by_number (btinfo, prev->up);
429 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
432 /* The caller of PREV is the preceding btrace function segment in this
433 function instance. */
434 gdb_assert (caller->next == 0);
436 caller->next = bfun->number;
437 bfun->prev = caller->number;
439 /* Maintain the function level. */
440 bfun->level = caller->level;
442 /* Maintain the call stack. */
443 bfun->up = caller->up;
444 bfun->flags = caller->flags;
446 ftrace_debug (bfun, "new return");
450 /* We did not find a caller. This could mean that something went
451 wrong or that the call is simply not included in the trace. */
453 /* Let's search for some actual call. */
454 caller = ftrace_find_call_by_number (btinfo, prev->up);
455 caller = ftrace_find_call (btinfo, caller);
458 /* There is no call in PREV's back trace. We assume that the
459 branch trace did not include it. */
461 /* Let's find the topmost function and add a new caller for it.
462 This should handle a series of initial tail calls. */
463 while (prev->up != 0)
464 prev = ftrace_find_call_by_number (btinfo, prev->up);
466 bfun->level = prev->level - 1;
468 /* Fix up the call stack for PREV. */
469 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
471 ftrace_debug (bfun, "new return - no caller");
475 /* There is a call in PREV's back trace to which we should have
476 returned but didn't. Let's start a new, separate back trace
477 from PREV's level. */
478 bfun->level = prev->level - 1;
480 /* We fix up the back trace for PREV but leave other function segments
481 on the same level as they are.
482 This should handle things like schedule () correctly where we're
483 switching contexts. */
484 prev->up = bfun->number;
485 prev->flags = BFUN_UP_LINKS_TO_RET;
487 ftrace_debug (bfun, "new return - unknown caller");
494 /* Add a new function segment for a function switch at the end of the trace.
495 BTINFO is the branch trace information for the current thread.
496 MFUN and FUN are the symbol information we have for this function. */
498 static struct btrace_function *
499 ftrace_new_switch (struct btrace_thread_info *btinfo,
500 struct minimal_symbol *mfun,
503 struct btrace_function *prev, *bfun;
505 /* This is an unexplained function switch. We can't really be sure about the
506 call stack, yet the best I can think of right now is to preserve it. */
507 bfun = ftrace_new_function (btinfo, mfun, fun);
508 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
510 bfun->flags = prev->flags;
512 ftrace_debug (bfun, "new switch");
517 /* Add a new function segment for a gap in the trace due to a decode error at
518 the end of the trace.
519 BTINFO is the branch trace information for the current thread.
520 ERRCODE is the format-specific error code. */
522 static struct btrace_function *
523 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
524 std::vector<unsigned int> &gaps)
526 struct btrace_function *bfun;
528 if (btinfo->functions.empty ())
529 bfun = ftrace_new_function (btinfo, NULL, NULL);
532 /* We hijack the previous function segment if it was empty. */
533 bfun = &btinfo->functions.back ();
534 if (bfun->errcode != 0 || !bfun->insn.empty ())
535 bfun = ftrace_new_function (btinfo, NULL, NULL);
538 bfun->errcode = errcode;
539 gaps.push_back (bfun->number);
541 ftrace_debug (bfun, "new gap");
546 /* Update the current function segment at the end of the trace in BTINFO with
547 respect to the instruction at PC. This may create new function segments.
548 Return the chronologically latest function segment, never NULL. */
550 static struct btrace_function *
551 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
553 struct bound_minimal_symbol bmfun;
554 struct minimal_symbol *mfun;
556 struct btrace_function *bfun;
558 /* Try to determine the function we're in. We use both types of symbols
559 to avoid surprises when we sometimes get a full symbol and sometimes
560 only a minimal symbol. */
561 fun = find_pc_function (pc);
562 bmfun = lookup_minimal_symbol_by_pc (pc);
565 if (fun == NULL && mfun == NULL)
566 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
568 /* If we didn't have a function, we create one. */
569 if (btinfo->functions.empty ())
570 return ftrace_new_function (btinfo, mfun, fun);
572 /* If we had a gap before, we create a function. */
573 bfun = &btinfo->functions.back ();
574 if (bfun->errcode != 0)
575 return ftrace_new_function (btinfo, mfun, fun);
577 /* Check the last instruction, if we have one.
578 We do this check first, since it allows us to fill in the call stack
579 links in addition to the normal flow links. */
580 btrace_insn *last = NULL;
581 if (!bfun->insn.empty ())
582 last = &bfun->insn.back ();
586 switch (last->iclass)
588 case BTRACE_INSN_RETURN:
592 /* On some systems, _dl_runtime_resolve returns to the resolved
593 function instead of jumping to it. From our perspective,
594 however, this is a tailcall.
595 If we treated it as return, we wouldn't be able to find the
596 resolved function in our stack back trace. Hence, we would
597 lose the current stack back trace and start anew with an empty
598 back trace. When the resolved function returns, we would then
599 create a stack back trace with the same function names but
600 different frame id's. This will confuse stepping. */
601 fname = ftrace_print_function_name (bfun);
602 if (strcmp (fname, "_dl_runtime_resolve") == 0)
603 return ftrace_new_tailcall (btinfo, mfun, fun);
605 return ftrace_new_return (btinfo, mfun, fun);
608 case BTRACE_INSN_CALL:
609 /* Ignore calls to the next instruction. They are used for PIC. */
610 if (last->pc + last->size == pc)
613 return ftrace_new_call (btinfo, mfun, fun);
615 case BTRACE_INSN_JUMP:
619 start = get_pc_function_start (pc);
621 /* A jump to the start of a function is (typically) a tail call. */
623 return ftrace_new_tailcall (btinfo, mfun, fun);
625 /* Some versions of _Unwind_RaiseException use an indirect
626 jump to 'return' to the exception handler of the caller
627 handling the exception instead of a return. Let's restrict
628 this heuristic to that and related functions. */
629 const char *fname = ftrace_print_function_name (bfun);
630 if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
632 struct btrace_function *caller
633 = ftrace_find_call_by_number (btinfo, bfun->up);
634 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
636 return ftrace_new_return (btinfo, mfun, fun);
639 /* If we can't determine the function for PC, we treat a jump at
640 the end of the block as tail call if we're switching functions
641 and as an intra-function branch if we don't. */
642 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
643 return ftrace_new_tailcall (btinfo, mfun, fun);
650 /* Check if we're switching functions for some other reason. */
651 if (ftrace_function_switched (bfun, mfun, fun))
653 DEBUG_FTRACE ("switching from %s in %s at %s",
654 ftrace_print_insn_addr (last),
655 ftrace_print_function_name (bfun),
656 ftrace_print_filename (bfun));
658 return ftrace_new_switch (btinfo, mfun, fun);
664 /* Add the instruction at PC to BFUN's instructions. */
667 ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
669 bfun->insn.push_back (insn);
671 if (record_debug > 1)
672 ftrace_debug (bfun, "update insn");
675 /* Classify the instruction at PC. */
677 static enum btrace_insn_class
678 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
680 enum btrace_insn_class iclass;
682 iclass = BTRACE_INSN_OTHER;
685 if (gdbarch_insn_is_call (gdbarch, pc))
686 iclass = BTRACE_INSN_CALL;
687 else if (gdbarch_insn_is_ret (gdbarch, pc))
688 iclass = BTRACE_INSN_RETURN;
689 else if (gdbarch_insn_is_jump (gdbarch, pc))
690 iclass = BTRACE_INSN_JUMP;
692 CATCH (error, RETURN_MASK_ERROR)
700 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
701 number of matching function segments or zero if the back traces do not
702 match. BTINFO is the branch trace information for the current thread. */
705 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
706 struct btrace_function *lhs,
707 struct btrace_function *rhs)
711 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
713 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
716 lhs = ftrace_get_caller (btinfo, lhs);
717 rhs = ftrace_get_caller (btinfo, rhs);
723 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
724 BTINFO is the branch trace information for the current thread. */
727 ftrace_fixup_level (struct btrace_thread_info *btinfo,
728 struct btrace_function *bfun, int adjustment)
733 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
734 ftrace_debug (bfun, "..bfun");
738 bfun->level += adjustment;
739 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
743 /* Recompute the global level offset. Traverse the function trace and compute
744 the global level offset as the negative of the minimal function level. */
747 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
754 if (btinfo->functions.empty ())
757 unsigned int length = btinfo->functions.size() - 1;
758 for (unsigned int i = 0; i < length; ++i)
759 level = std::min (level, btinfo->functions[i].level);
761 /* The last function segment contains the current instruction, which is not
762 really part of the trace. If it contains just this one instruction, we
763 ignore the segment. */
764 struct btrace_function *last = &btinfo->functions.back();
765 if (last->insn.size () != 1)
766 level = std::min (level, last->level);
768 DEBUG_FTRACE ("setting global level offset: %d", -level);
769 btinfo->level = -level;
772 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
773 ftrace_connect_backtrace. BTINFO is the branch trace information for the
777 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
778 struct btrace_function *prev,
779 struct btrace_function *next)
781 DEBUG_FTRACE ("connecting...");
782 ftrace_debug (prev, "..prev");
783 ftrace_debug (next, "..next");
785 /* The function segments are not yet connected. */
786 gdb_assert (prev->next == 0);
787 gdb_assert (next->prev == 0);
789 prev->next = next->number;
790 next->prev = prev->number;
792 /* We may have moved NEXT to a different function level. */
793 ftrace_fixup_level (btinfo, next, prev->level - next->level);
795 /* If we run out of back trace for one, let's use the other's. */
798 const btrace_function_flags flags = next->flags;
800 next = ftrace_find_call_by_number (btinfo, next->up);
803 DEBUG_FTRACE ("using next's callers");
804 ftrace_fixup_caller (btinfo, prev, next, flags);
807 else if (next->up == 0)
809 const btrace_function_flags flags = prev->flags;
811 prev = ftrace_find_call_by_number (btinfo, prev->up);
814 DEBUG_FTRACE ("using prev's callers");
815 ftrace_fixup_caller (btinfo, next, prev, flags);
820 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
821 link to add the tail callers to NEXT's back trace.
823 This removes NEXT->UP from NEXT's back trace. It will be added back
824 when connecting NEXT and PREV's callers - provided they exist.
826 If PREV's back trace consists of a series of tail calls without an
827 actual call, there will be no further connection and NEXT's caller will
828 be removed for good. To catch this case, we handle it here and connect
829 the top of PREV's back trace to NEXT's caller. */
830 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
832 struct btrace_function *caller;
833 btrace_function_flags next_flags, prev_flags;
835 /* We checked NEXT->UP above so CALLER can't be NULL. */
836 caller = ftrace_find_call_by_number (btinfo, next->up);
837 next_flags = next->flags;
838 prev_flags = prev->flags;
840 DEBUG_FTRACE ("adding prev's tail calls to next");
842 prev = ftrace_find_call_by_number (btinfo, prev->up);
843 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
845 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
848 /* At the end of PREV's back trace, continue with CALLER. */
851 DEBUG_FTRACE ("fixing up link for tailcall chain");
852 ftrace_debug (prev, "..top");
853 ftrace_debug (caller, "..up");
855 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
857 /* If we skipped any tail calls, this may move CALLER to a
858 different function level.
860 Note that changing CALLER's level is only OK because we
861 know that this is the last iteration of the bottom-to-top
862 walk in ftrace_connect_backtrace.
864 Otherwise we will fix up CALLER's level when we connect it
865 to PREV's caller in the next iteration. */
866 ftrace_fixup_level (btinfo, caller,
867 prev->level - caller->level - 1);
871 /* There's nothing to do if we find a real call. */
872 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
874 DEBUG_FTRACE ("will fix up link in next iteration");
882 /* Connect function segments on the same level in the back trace at LHS and RHS.
883 The back traces at LHS and RHS are expected to match according to
884 ftrace_match_backtrace. BTINFO is the branch trace information for the
888 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
889 struct btrace_function *lhs,
890 struct btrace_function *rhs)
892 while (lhs != NULL && rhs != NULL)
894 struct btrace_function *prev, *next;
896 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
898 /* Connecting LHS and RHS may change the up link. */
902 lhs = ftrace_get_caller (btinfo, lhs);
903 rhs = ftrace_get_caller (btinfo, rhs);
905 ftrace_connect_bfun (btinfo, prev, next);
909 /* Bridge the gap between two function segments left and right of a gap if their
910 respective back traces match in at least MIN_MATCHES functions. BTINFO is
911 the branch trace information for the current thread.
913 Returns non-zero if the gap could be bridged, zero otherwise. */
916 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
917 struct btrace_function *lhs, struct btrace_function *rhs,
920 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
923 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
924 rhs->insn_offset - 1, min_matches);
930 /* We search the back traces of LHS and RHS for valid connections and connect
931 the two functon segments that give the longest combined back trace. */
933 for (cand_l = lhs; cand_l != NULL;
934 cand_l = ftrace_get_caller (btinfo, cand_l))
935 for (cand_r = rhs; cand_r != NULL;
936 cand_r = ftrace_get_caller (btinfo, cand_r))
940 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
941 if (best_matches < matches)
943 best_matches = matches;
949 /* We need at least MIN_MATCHES matches. */
950 gdb_assert (min_matches > 0);
951 if (best_matches < min_matches)
954 DEBUG_FTRACE ("..matches: %d", best_matches);
956 /* We will fix up the level of BEST_R and succeeding function segments such
957 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
959 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
960 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
962 To catch this, we already fix up the level here where we can start at RHS
963 instead of at BEST_R. We will ignore the level fixup when connecting
964 BEST_L to BEST_R as they will already be on the same level. */
965 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
967 ftrace_connect_backtrace (btinfo, best_l, best_r);
972 /* Try to bridge gaps due to overflow or decode errors by connecting the
973 function segments that are separated by the gap. */
976 btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
978 struct btrace_thread_info *btinfo = &tp->btrace;
979 std::vector<unsigned int> remaining;
982 DEBUG ("bridge gaps");
984 /* We require a minimum amount of matches for bridging a gap. The number of
985 required matches will be lowered with each iteration.
987 The more matches the higher our confidence that the bridging is correct.
988 For big gaps or small traces, however, it may not be feasible to require a
989 high number of matches. */
990 for (min_matches = 5; min_matches > 0; --min_matches)
992 /* Let's try to bridge as many gaps as we can. In some cases, we need to
993 skip a gap and revisit it again after we closed later gaps. */
994 while (!gaps.empty ())
996 for (const unsigned int number : gaps)
998 struct btrace_function *gap, *lhs, *rhs;
1001 gap = ftrace_find_call_by_number (btinfo, number);
1003 /* We may have a sequence of gaps if we run from one error into
1004 the next as we try to re-sync onto the trace stream. Ignore
1005 all but the leftmost gap in such a sequence.
1007 Also ignore gaps at the beginning of the trace. */
1008 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
1009 if (lhs == NULL || lhs->errcode != 0)
1012 /* Skip gaps to the right. */
1013 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1014 while (rhs != NULL && rhs->errcode != 0)
1015 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1017 /* Ignore gaps at the end of the trace. */
1021 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1023 /* Keep track of gaps we were not able to bridge and try again.
1024 If we just pushed them to the end of GAPS we would risk an
1025 infinite loop in case we simply cannot bridge a gap. */
1027 remaining.push_back (number);
1030 /* Let's see if we made any progress. */
1031 if (remaining.size () == gaps.size ())
1035 gaps.swap (remaining);
1038 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1045 /* We may omit this in some cases. Not sure it is worth the extra
1046 complication, though. */
1047 ftrace_compute_global_level_offset (btinfo);
1050 /* Compute the function branch trace from BTS trace. */
1053 btrace_compute_ftrace_bts (struct thread_info *tp,
1054 const struct btrace_data_bts *btrace,
1055 std::vector<unsigned int> &gaps)
1057 struct btrace_thread_info *btinfo;
1058 struct gdbarch *gdbarch;
1062 gdbarch = target_gdbarch ();
1063 btinfo = &tp->btrace;
1064 blk = VEC_length (btrace_block_s, btrace->blocks);
1066 if (btinfo->functions.empty ())
1069 level = -btinfo->level;
1073 btrace_block_s *block;
1078 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1083 struct btrace_function *bfun;
1084 struct btrace_insn insn;
1087 /* We should hit the end of the block. Warn if we went too far. */
1088 if (block->end < pc)
1090 /* Indicate the gap in the trace. */
1091 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1093 warning (_("Recorded trace may be corrupted at instruction "
1094 "%u (pc = %s)."), bfun->insn_offset - 1,
1095 core_addr_to_string_nz (pc));
1100 bfun = ftrace_update_function (btinfo, pc);
1102 /* Maintain the function level offset.
1103 For all but the last block, we do it here. */
1105 level = std::min (level, bfun->level);
1110 size = gdb_insn_length (gdbarch, pc);
1112 CATCH (error, RETURN_MASK_ERROR)
1119 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1122 ftrace_update_insns (bfun, insn);
1124 /* We're done once we pushed the instruction at the end. */
1125 if (block->end == pc)
1128 /* We can't continue if we fail to compute the size. */
1131 /* Indicate the gap in the trace. We just added INSN so we're
1132 not at the beginning. */
1133 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1135 warning (_("Recorded trace may be incomplete at instruction %u "
1136 "(pc = %s)."), bfun->insn_offset - 1,
1137 core_addr_to_string_nz (pc));
1144 /* Maintain the function level offset.
1145 For the last block, we do it here to not consider the last
1147 Since the last instruction corresponds to the current instruction
1148 and is not really part of the execution history, it shouldn't
1149 affect the level. */
1151 level = std::min (level, bfun->level);
1155 /* LEVEL is the minimal function level of all btrace function segments.
1156 Define the global level offset to -LEVEL so all function levels are
1157 normalized to start at zero. */
1158 btinfo->level = -level;
1161 #if defined (HAVE_LIBIPT)
1163 static enum btrace_insn_class
1164 pt_reclassify_insn (enum pt_insn_class iclass)
1169 return BTRACE_INSN_CALL;
1172 return BTRACE_INSN_RETURN;
1175 return BTRACE_INSN_JUMP;
1178 return BTRACE_INSN_OTHER;
1182 /* Return the btrace instruction flags for INSN. */
1184 static btrace_insn_flags
1185 pt_btrace_insn_flags (const struct pt_insn &insn)
1187 btrace_insn_flags flags = 0;
1189 if (insn.speculative)
1190 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1195 /* Return the btrace instruction for INSN. */
1198 pt_btrace_insn (const struct pt_insn &insn)
1200 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1201 pt_reclassify_insn (insn.iclass),
1202 pt_btrace_insn_flags (insn)};
1205 /* Handle instruction decode events (libipt-v2). */
1208 handle_pt_insn_events (struct btrace_thread_info *btinfo,
1209 struct pt_insn_decoder *decoder,
1210 std::vector<unsigned int> &gaps, int status)
1212 #if defined (HAVE_PT_INSN_EVENT)
1213 while (status & pts_event_pending)
1215 struct btrace_function *bfun;
1216 struct pt_event event;
1219 status = pt_insn_event (decoder, &event, sizeof (event));
1229 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1231 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1233 pt_insn_get_offset (decoder, &offset);
1235 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1236 PRIx64 ")."), bfun->insn_offset - 1, offset);
1242 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1244 pt_insn_get_offset (decoder, &offset);
1246 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1247 bfun->insn_offset - 1, offset);
1252 #endif /* defined (HAVE_PT_INSN_EVENT) */
1257 /* Handle events indicated by flags in INSN (libipt-v1). */
1260 handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1261 struct pt_insn_decoder *decoder,
1262 const struct pt_insn &insn,
1263 std::vector<unsigned int> &gaps)
1265 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1266 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1267 times, we continue from the same instruction we stopped before. This is
1268 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1269 means that we continued from some other instruction. Indicate this as a
1270 trace gap except when tracing just started. */
1271 if (insn.enabled && !btinfo->functions.empty ())
1273 struct btrace_function *bfun;
1276 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1278 pt_insn_get_offset (decoder, &offset);
1280 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1281 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1284 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1286 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1287 /* Indicate trace overflows. */
1290 struct btrace_function *bfun;
1293 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1295 pt_insn_get_offset (decoder, &offset);
1297 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1298 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1300 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1303 /* Add function branch trace to BTINFO using DECODER. */
1306 ftrace_add_pt (struct btrace_thread_info *btinfo,
1307 struct pt_insn_decoder *decoder,
1309 std::vector<unsigned int> &gaps)
1311 struct btrace_function *bfun;
1317 struct pt_insn insn;
1319 status = pt_insn_sync_forward (decoder);
1322 if (status != -pte_eos)
1323 warning (_("Failed to synchronize onto the Intel Processor "
1324 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1330 /* Handle events from the previous iteration or synchronization. */
1331 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1335 status = pt_insn_next (decoder, &insn, sizeof(insn));
1339 /* Handle events indicated by flags in INSN. */
1340 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1342 bfun = ftrace_update_function (btinfo, insn.ip);
1344 /* Maintain the function level offset. */
1345 *plevel = std::min (*plevel, bfun->level);
1347 ftrace_update_insns (bfun, pt_btrace_insn (insn));
1350 if (status == -pte_eos)
1353 /* Indicate the gap in the trace. */
1354 bfun = ftrace_new_gap (btinfo, status, gaps);
1356 pt_insn_get_offset (decoder, &offset);
1358 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1359 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1360 offset, insn.ip, pt_errstr (pt_errcode (status)));
1364 /* A callback function to allow the trace decoder to read the inferior's
1368 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1369 const struct pt_asid *asid, uint64_t pc,
1372 int result, errcode;
1374 result = (int) size;
1377 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1379 result = -pte_nomap;
1381 CATCH (error, RETURN_MASK_ERROR)
1383 result = -pte_nomap;
1390 /* Translate the vendor from one enum to another. */
1392 static enum pt_cpu_vendor
1393 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1405 /* Finalize the function branch trace after decode. */
1407 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1408 struct thread_info *tp, int level)
1410 pt_insn_free_decoder (decoder);
1412 /* LEVEL is the minimal function level of all btrace function segments.
1413 Define the global level offset to -LEVEL so all function levels are
1414 normalized to start at zero. */
1415 tp->btrace.level = -level;
1417 /* Add a single last instruction entry for the current PC.
1418 This allows us to compute the backtrace at the current PC using both
1419 standard unwind and btrace unwind.
1420 This extra entry is ignored by all record commands. */
1424 /* Compute the function branch trace from Intel Processor Trace
1428 btrace_compute_ftrace_pt (struct thread_info *tp,
1429 const struct btrace_data_pt *btrace,
1430 std::vector<unsigned int> &gaps)
1432 struct btrace_thread_info *btinfo;
1433 struct pt_insn_decoder *decoder;
1434 struct pt_config config;
1437 if (btrace->size == 0)
1440 btinfo = &tp->btrace;
1441 if (btinfo->functions.empty ())
1444 level = -btinfo->level;
1446 pt_config_init(&config);
1447 config.begin = btrace->data;
1448 config.end = btrace->data + btrace->size;
1450 /* We treat an unknown vendor as 'no errata'. */
1451 if (btrace->config.cpu.vendor != CV_UNKNOWN)
1454 = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1455 config.cpu.family = btrace->config.cpu.family;
1456 config.cpu.model = btrace->config.cpu.model;
1457 config.cpu.stepping = btrace->config.cpu.stepping;
1459 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1461 error (_("Failed to configure the Intel Processor Trace "
1462 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1465 decoder = pt_insn_alloc_decoder (&config);
1466 if (decoder == NULL)
1467 error (_("Failed to allocate the Intel Processor Trace decoder."));
1471 struct pt_image *image;
1473 image = pt_insn_get_image(decoder);
1475 error (_("Failed to configure the Intel Processor Trace decoder."));
1477 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1479 error (_("Failed to configure the Intel Processor Trace decoder: "
1480 "%s."), pt_errstr (pt_errcode (errcode)));
1482 ftrace_add_pt (btinfo, decoder, &level, gaps);
1484 CATCH (error, RETURN_MASK_ALL)
1486 /* Indicate a gap in the trace if we quit trace processing. */
1487 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1488 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1490 btrace_finalize_ftrace_pt (decoder, tp, level);
1492 throw_exception (error);
1496 btrace_finalize_ftrace_pt (decoder, tp, level);
1499 #else /* defined (HAVE_LIBIPT) */
1502 btrace_compute_ftrace_pt (struct thread_info *tp,
1503 const struct btrace_data_pt *btrace,
1504 std::vector<unsigned int> &gaps)
1506 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1509 #endif /* defined (HAVE_LIBIPT) */
1511 /* Compute the function branch trace from a block branch trace BTRACE for
1512 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1513 branch trace configuration. This is currently only used for the PT
1517 btrace_compute_ftrace_1 (struct thread_info *tp,
1518 struct btrace_data *btrace,
1519 const struct btrace_cpu *cpu,
1520 std::vector<unsigned int> &gaps)
1522 DEBUG ("compute ftrace");
1524 switch (btrace->format)
1526 case BTRACE_FORMAT_NONE:
1529 case BTRACE_FORMAT_BTS:
1530 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1533 case BTRACE_FORMAT_PT:
1534 /* Overwrite the cpu we use for enabling errata workarounds. */
1536 btrace->variant.pt.config.cpu = *cpu;
1538 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1542 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1546 btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1550 tp->btrace.ngaps += gaps.size ();
1551 btrace_bridge_gaps (tp, gaps);
1556 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1557 const struct btrace_cpu *cpu)
1559 std::vector<unsigned int> gaps;
1563 btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
1565 CATCH (error, RETURN_MASK_ALL)
1567 btrace_finalize_ftrace (tp, gaps);
1569 throw_exception (error);
1573 btrace_finalize_ftrace (tp, gaps);
1576 /* Add an entry for the current PC. */
1579 btrace_add_pc (struct thread_info *tp)
1581 struct btrace_data btrace;
1582 struct btrace_block *block;
1583 struct regcache *regcache;
1586 regcache = get_thread_regcache (tp);
1587 pc = regcache_read_pc (regcache);
1589 btrace.format = BTRACE_FORMAT_BTS;
1590 btrace.variant.bts.blocks = NULL;
1592 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1596 btrace_compute_ftrace (tp, &btrace, NULL);
1602 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1604 if (tp->btrace.target != NULL)
1607 #if !defined (HAVE_LIBIPT)
1608 if (conf->format == BTRACE_FORMAT_PT)
1609 error (_("Intel Processor Trace support was disabled at compile time."));
1610 #endif /* !defined (HAVE_LIBIPT) */
1612 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1613 target_pid_to_str (tp->ptid).c_str ());
1615 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1617 /* We're done if we failed to enable tracing. */
1618 if (tp->btrace.target == NULL)
1621 /* We need to undo the enable in case of errors. */
1624 /* Add an entry for the current PC so we start tracing from where we
1627 If we can't access TP's registers, TP is most likely running. In this
1628 case, we can't really say where tracing was enabled so it should be
1629 safe to simply skip this step.
1631 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1632 start at the PC at which tracing was enabled. */
1633 if (conf->format != BTRACE_FORMAT_PT
1634 && can_access_registers_thread (tp))
1637 CATCH (exception, RETURN_MASK_ALL)
1639 btrace_disable (tp);
1641 throw_exception (exception);
1648 const struct btrace_config *
1649 btrace_conf (const struct btrace_thread_info *btinfo)
1651 if (btinfo->target == NULL)
1654 return target_btrace_conf (btinfo->target);
1660 btrace_disable (struct thread_info *tp)
1662 struct btrace_thread_info *btp = &tp->btrace;
1664 if (btp->target == NULL)
1667 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1668 target_pid_to_str (tp->ptid).c_str ());
1670 target_disable_btrace (btp->target);
1679 btrace_teardown (struct thread_info *tp)
1681 struct btrace_thread_info *btp = &tp->btrace;
1683 if (btp->target == NULL)
1686 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1687 target_pid_to_str (tp->ptid).c_str ());
1689 target_teardown_btrace (btp->target);
1695 /* Stitch branch trace in BTS format. */
1698 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1700 struct btrace_thread_info *btinfo;
1701 struct btrace_function *last_bfun;
1702 btrace_block_s *first_new_block;
1704 btinfo = &tp->btrace;
1705 gdb_assert (!btinfo->functions.empty ());
1706 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1708 last_bfun = &btinfo->functions.back ();
1710 /* If the existing trace ends with a gap, we just glue the traces
1711 together. We need to drop the last (i.e. chronologically first) block
1712 of the new trace, though, since we can't fill in the start address.*/
1713 if (last_bfun->insn.empty ())
1715 VEC_pop (btrace_block_s, btrace->blocks);
1719 /* Beware that block trace starts with the most recent block, so the
1720 chronologically first block in the new trace is the last block in
1721 the new trace's block vector. */
1722 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1723 const btrace_insn &last_insn = last_bfun->insn.back ();
1725 /* If the current PC at the end of the block is the same as in our current
1726 trace, there are two explanations:
1727 1. we executed the instruction and some branch brought us back.
1728 2. we have not made any progress.
1729 In the first case, the delta trace vector should contain at least two
1731 In the second case, the delta trace vector should contain exactly one
1732 entry for the partial block containing the current PC. Remove it. */
1733 if (first_new_block->end == last_insn.pc
1734 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1736 VEC_pop (btrace_block_s, btrace->blocks);
1740 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
1741 core_addr_to_string_nz (first_new_block->end));
1743 /* Do a simple sanity check to make sure we don't accidentally end up
1744 with a bad block. This should not occur in practice. */
1745 if (first_new_block->end < last_insn.pc)
1747 warning (_("Error while trying to read delta trace. Falling back to "
1752 /* We adjust the last block to start at the end of our current trace. */
1753 gdb_assert (first_new_block->begin == 0);
1754 first_new_block->begin = last_insn.pc;
1756 /* We simply pop the last insn so we can insert it again as part of
1757 the normal branch trace computation.
1758 Since instruction iterators are based on indices in the instructions
1759 vector, we don't leave any pointers dangling. */
1760 DEBUG ("pruning insn at %s for stitching",
1761 ftrace_print_insn_addr (&last_insn));
1763 last_bfun->insn.pop_back ();
1765 /* The instructions vector may become empty temporarily if this has
1766 been the only instruction in this function segment.
1767 This violates the invariant but will be remedied shortly by
1768 btrace_compute_ftrace when we add the new trace. */
1770 /* The only case where this would hurt is if the entire trace consisted
1771 of just that one instruction. If we remove it, we might turn the now
1772 empty btrace function segment into a gap. But we don't want gaps at
1773 the beginning. To avoid this, we remove the entire old trace. */
1774 if (last_bfun->number == 1 && last_bfun->insn.empty ())
1780 /* Adjust the block trace in order to stitch old and new trace together.
1781 BTRACE is the new delta trace between the last and the current stop.
1782 TP is the traced thread.
1783 May modifx BTRACE as well as the existing trace in TP.
1784 Return 0 on success, -1 otherwise. */
1787 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1789 /* If we don't have trace, there's nothing to do. */
1790 if (btrace->empty ())
1793 switch (btrace->format)
1795 case BTRACE_FORMAT_NONE:
1798 case BTRACE_FORMAT_BTS:
1799 return btrace_stitch_bts (&btrace->variant.bts, tp);
1801 case BTRACE_FORMAT_PT:
1802 /* Delta reads are not supported. */
1806 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1809 /* Clear the branch trace histories in BTINFO. */
1812 btrace_clear_history (struct btrace_thread_info *btinfo)
1814 xfree (btinfo->insn_history);
1815 xfree (btinfo->call_history);
1816 xfree (btinfo->replay);
1818 btinfo->insn_history = NULL;
1819 btinfo->call_history = NULL;
1820 btinfo->replay = NULL;
1823 /* Clear the branch trace maintenance histories in BTINFO. */
1826 btrace_maint_clear (struct btrace_thread_info *btinfo)
1828 switch (btinfo->data.format)
1833 case BTRACE_FORMAT_BTS:
1834 btinfo->maint.variant.bts.packet_history.begin = 0;
1835 btinfo->maint.variant.bts.packet_history.end = 0;
1838 #if defined (HAVE_LIBIPT)
1839 case BTRACE_FORMAT_PT:
1840 xfree (btinfo->maint.variant.pt.packets);
1842 btinfo->maint.variant.pt.packets = NULL;
1843 btinfo->maint.variant.pt.packet_history.begin = 0;
1844 btinfo->maint.variant.pt.packet_history.end = 0;
1846 #endif /* defined (HAVE_LIBIPT) */
1853 btrace_decode_error (enum btrace_format format, int errcode)
1857 case BTRACE_FORMAT_BTS:
1860 case BDE_BTS_OVERFLOW:
1861 return _("instruction overflow");
1863 case BDE_BTS_INSN_SIZE:
1864 return _("unknown instruction");
1871 #if defined (HAVE_LIBIPT)
1872 case BTRACE_FORMAT_PT:
1875 case BDE_PT_USER_QUIT:
1876 return _("trace decode cancelled");
1878 case BDE_PT_DISABLED:
1879 return _("disabled");
1881 case BDE_PT_OVERFLOW:
1882 return _("overflow");
1886 return pt_errstr (pt_errcode (errcode));
1890 #endif /* defined (HAVE_LIBIPT) */
1896 return _("unknown");
1902 btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
1904 struct btrace_thread_info *btinfo;
1905 struct btrace_target_info *tinfo;
1906 struct btrace_data btrace;
1909 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1910 target_pid_to_str (tp->ptid).c_str ());
1912 btinfo = &tp->btrace;
1913 tinfo = btinfo->target;
1917 /* There's no way we could get new trace while replaying.
1918 On the other hand, delta trace would return a partial record with the
1919 current PC, which is the replay PC, not the last PC, as expected. */
1920 if (btinfo->replay != NULL)
1923 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1924 can store a gdb.Record object in Python referring to a different thread
1925 than the current one, temporarily set INFERIOR_PTID. */
1926 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
1927 inferior_ptid = tp->ptid;
1929 /* We should not be called on running or exited threads. */
1930 gdb_assert (can_access_registers_thread (tp));
1932 /* Let's first try to extend the trace we already have. */
1933 if (!btinfo->functions.empty ())
1935 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1938 /* Success. Let's try to stitch the traces together. */
1939 errcode = btrace_stitch_trace (&btrace, tp);
1943 /* We failed to read delta trace. Let's try to read new trace. */
1944 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1946 /* If we got any new trace, discard what we have. */
1947 if (errcode == 0 && !btrace.empty ())
1951 /* If we were not able to read the trace, we start over. */
1955 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1959 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1961 /* If we were not able to read the branch trace, signal an error. */
1963 error (_("Failed to read branch trace."));
1965 /* Compute the trace, provided we have any. */
1966 if (!btrace.empty ())
1968 /* Store the raw trace data. The stored data will be cleared in
1969 btrace_clear, so we always append the new trace. */
1970 btrace_data_append (&btinfo->data, &btrace);
1971 btrace_maint_clear (btinfo);
1973 btrace_clear_history (btinfo);
1974 btrace_compute_ftrace (tp, &btrace, cpu);
1981 btrace_clear (struct thread_info *tp)
1983 struct btrace_thread_info *btinfo;
1985 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1986 target_pid_to_str (tp->ptid).c_str ());
1988 /* Make sure btrace frames that may hold a pointer into the branch
1989 trace data are destroyed. */
1990 reinit_frame_cache ();
1992 btinfo = &tp->btrace;
1994 btinfo->functions.clear ();
1997 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1998 btrace_maint_clear (btinfo);
1999 btinfo->data.clear ();
2000 btrace_clear_history (btinfo);
2006 btrace_free_objfile (struct objfile *objfile)
2008 DEBUG ("free objfile");
2010 for (thread_info *tp : all_non_exited_threads ())
2014 #if defined (HAVE_LIBEXPAT)
2016 /* Check the btrace document version. */
2019 check_xml_btrace_version (struct gdb_xml_parser *parser,
2020 const struct gdb_xml_element *element,
2022 std::vector<gdb_xml_value> &attributes)
2025 = (const char *) xml_find_attribute (attributes, "version")->value.get ();
2027 if (strcmp (version, "1.0") != 0)
2028 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
2031 /* Parse a btrace "block" xml record. */
2034 parse_xml_btrace_block (struct gdb_xml_parser *parser,
2035 const struct gdb_xml_element *element,
2037 std::vector<gdb_xml_value> &attributes)
2039 struct btrace_data *btrace;
2040 struct btrace_block *block;
2041 ULONGEST *begin, *end;
2043 btrace = (struct btrace_data *) user_data;
2045 switch (btrace->format)
2047 case BTRACE_FORMAT_BTS:
2050 case BTRACE_FORMAT_NONE:
2051 btrace->format = BTRACE_FORMAT_BTS;
2052 btrace->variant.bts.blocks = NULL;
2056 gdb_xml_error (parser, _("Btrace format error."));
2059 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value.get ();
2060 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value.get ();
2062 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
2063 block->begin = *begin;
2067 /* Parse a "raw" xml record. */
2070 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
2071 gdb_byte **pdata, size_t *psize)
2076 len = strlen (body_text);
2078 gdb_xml_error (parser, _("Bad raw data size."));
2082 gdb::unique_xmalloc_ptr<gdb_byte> data ((gdb_byte *) xmalloc (size));
2085 /* We use hex encoding - see common/rsp-low.h. */
2093 if (hi == 0 || lo == 0)
2094 gdb_xml_error (parser, _("Bad hex encoding."));
2096 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2100 *pdata = data.release ();
2104 /* Parse a btrace pt-config "cpu" xml record. */
2107 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2108 const struct gdb_xml_element *element,
2110 std::vector<gdb_xml_value> &attributes)
2112 struct btrace_data *btrace;
2114 ULONGEST *family, *model, *stepping;
2117 (const char *) xml_find_attribute (attributes, "vendor")->value.get ();
2119 = (ULONGEST *) xml_find_attribute (attributes, "family")->value.get ();
2121 = (ULONGEST *) xml_find_attribute (attributes, "model")->value.get ();
2123 = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value.get ();
2125 btrace = (struct btrace_data *) user_data;
2127 if (strcmp (vendor, "GenuineIntel") == 0)
2128 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2130 btrace->variant.pt.config.cpu.family = *family;
2131 btrace->variant.pt.config.cpu.model = *model;
2132 btrace->variant.pt.config.cpu.stepping = *stepping;
2135 /* Parse a btrace pt "raw" xml record. */
2138 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2139 const struct gdb_xml_element *element,
2140 void *user_data, const char *body_text)
2142 struct btrace_data *btrace;
2144 btrace = (struct btrace_data *) user_data;
2145 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2146 &btrace->variant.pt.size);
2149 /* Parse a btrace "pt" xml record. */
2152 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2153 const struct gdb_xml_element *element,
2155 std::vector<gdb_xml_value> &attributes)
2157 struct btrace_data *btrace;
2159 btrace = (struct btrace_data *) user_data;
2160 btrace->format = BTRACE_FORMAT_PT;
2161 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2162 btrace->variant.pt.data = NULL;
2163 btrace->variant.pt.size = 0;
2166 static const struct gdb_xml_attribute block_attributes[] = {
2167 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2168 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2169 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2172 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2173 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2174 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2175 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2176 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2177 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2180 static const struct gdb_xml_element btrace_pt_config_children[] = {
2181 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2182 parse_xml_btrace_pt_config_cpu, NULL },
2183 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2186 static const struct gdb_xml_element btrace_pt_children[] = {
2187 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2189 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2190 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2193 static const struct gdb_xml_attribute btrace_attributes[] = {
2194 { "version", GDB_XML_AF_NONE, NULL, NULL },
2195 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2198 static const struct gdb_xml_element btrace_children[] = {
2199 { "block", block_attributes, NULL,
2200 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2201 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2203 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2206 static const struct gdb_xml_element btrace_elements[] = {
2207 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2208 check_xml_btrace_version, NULL },
2209 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2212 #endif /* defined (HAVE_LIBEXPAT) */
2217 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2219 #if defined (HAVE_LIBEXPAT)
2223 result.format = BTRACE_FORMAT_NONE;
2225 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2228 error (_("Error parsing branch trace."));
2230 /* Keep parse results. */
2231 *btrace = std::move (result);
2233 #else /* !defined (HAVE_LIBEXPAT) */
2235 error (_("Cannot process branch trace. XML support was disabled at "
2238 #endif /* !defined (HAVE_LIBEXPAT) */
2241 #if defined (HAVE_LIBEXPAT)
2243 /* Parse a btrace-conf "bts" xml record. */
2246 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2247 const struct gdb_xml_element *element,
2249 std::vector<gdb_xml_value> &attributes)
2251 struct btrace_config *conf;
2252 struct gdb_xml_value *size;
2254 conf = (struct btrace_config *) user_data;
2255 conf->format = BTRACE_FORMAT_BTS;
2258 size = xml_find_attribute (attributes, "size");
2260 conf->bts.size = (unsigned int) *(ULONGEST *) size->value.get ();
2263 /* Parse a btrace-conf "pt" xml record. */
2266 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2267 const struct gdb_xml_element *element,
2269 std::vector<gdb_xml_value> &attributes)
2271 struct btrace_config *conf;
2272 struct gdb_xml_value *size;
2274 conf = (struct btrace_config *) user_data;
2275 conf->format = BTRACE_FORMAT_PT;
2278 size = xml_find_attribute (attributes, "size");
2280 conf->pt.size = (unsigned int) *(ULONGEST *) size->value.get ();
2283 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2284 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2285 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2288 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2289 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2290 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2293 static const struct gdb_xml_element btrace_conf_children[] = {
2294 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2295 parse_xml_btrace_conf_bts, NULL },
2296 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2297 parse_xml_btrace_conf_pt, NULL },
2298 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2301 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2302 { "version", GDB_XML_AF_NONE, NULL, NULL },
2303 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2306 static const struct gdb_xml_element btrace_conf_elements[] = {
2307 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2308 GDB_XML_EF_NONE, NULL, NULL },
2309 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2312 #endif /* defined (HAVE_LIBEXPAT) */
2317 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2319 #if defined (HAVE_LIBEXPAT)
2322 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2323 btrace_conf_elements, xml, conf);
2325 error (_("Error parsing branch trace configuration."));
2327 #else /* !defined (HAVE_LIBEXPAT) */
2329 error (_("Cannot process the branch trace configuration. XML support "
2330 "was disabled at compile time."));
2332 #endif /* !defined (HAVE_LIBEXPAT) */
2337 const struct btrace_insn *
2338 btrace_insn_get (const struct btrace_insn_iterator *it)
2340 const struct btrace_function *bfun;
2341 unsigned int index, end;
2343 index = it->insn_index;
2344 bfun = &it->btinfo->functions[it->call_index];
2346 /* Check if the iterator points to a gap in the trace. */
2347 if (bfun->errcode != 0)
2350 /* The index is within the bounds of this function's instruction vector. */
2351 end = bfun->insn.size ();
2352 gdb_assert (0 < end);
2353 gdb_assert (index < end);
2355 return &bfun->insn[index];
2361 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2363 return it->btinfo->functions[it->call_index].errcode;
2369 btrace_insn_number (const struct btrace_insn_iterator *it)
2371 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2377 btrace_insn_begin (struct btrace_insn_iterator *it,
2378 const struct btrace_thread_info *btinfo)
2380 if (btinfo->functions.empty ())
2381 error (_("No trace."));
2383 it->btinfo = btinfo;
2391 btrace_insn_end (struct btrace_insn_iterator *it,
2392 const struct btrace_thread_info *btinfo)
2394 const struct btrace_function *bfun;
2395 unsigned int length;
2397 if (btinfo->functions.empty ())
2398 error (_("No trace."));
2400 bfun = &btinfo->functions.back ();
2401 length = bfun->insn.size ();
2403 /* The last function may either be a gap or it contains the current
2404 instruction, which is one past the end of the execution trace; ignore
2409 it->btinfo = btinfo;
2410 it->call_index = bfun->number - 1;
2411 it->insn_index = length;
2417 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2419 const struct btrace_function *bfun;
2420 unsigned int index, steps;
2422 bfun = &it->btinfo->functions[it->call_index];
2424 index = it->insn_index;
2428 unsigned int end, space, adv;
2430 end = bfun->insn.size ();
2432 /* An empty function segment represents a gap in the trace. We count
2433 it as one instruction. */
2436 const struct btrace_function *next;
2438 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2451 gdb_assert (0 < end);
2452 gdb_assert (index < end);
2454 /* Compute the number of instructions remaining in this segment. */
2455 space = end - index;
2457 /* Advance the iterator as far as possible within this segment. */
2458 adv = std::min (space, stride);
2463 /* Move to the next function if we're at the end of this one. */
2466 const struct btrace_function *next;
2468 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2471 /* We stepped past the last function.
2473 Let's adjust the index to point to the last instruction in
2474 the previous function. */
2480 /* We now point to the first instruction in the new function. */
2485 /* We did make progress. */
2486 gdb_assert (adv > 0);
2489 /* Update the iterator. */
2490 it->call_index = bfun->number - 1;
2491 it->insn_index = index;
2499 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2501 const struct btrace_function *bfun;
2502 unsigned int index, steps;
2504 bfun = &it->btinfo->functions[it->call_index];
2506 index = it->insn_index;
2512 /* Move to the previous function if we're at the start of this one. */
2515 const struct btrace_function *prev;
2517 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2521 /* We point to one after the last instruction in the new function. */
2523 index = bfun->insn.size ();
2525 /* An empty function segment represents a gap in the trace. We count
2526 it as one instruction. */
2536 /* Advance the iterator as far as possible within this segment. */
2537 adv = std::min (index, stride);
2543 /* We did make progress. */
2544 gdb_assert (adv > 0);
2547 /* Update the iterator. */
2548 it->call_index = bfun->number - 1;
2549 it->insn_index = index;
2557 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2558 const struct btrace_insn_iterator *rhs)
2560 gdb_assert (lhs->btinfo == rhs->btinfo);
2562 if (lhs->call_index != rhs->call_index)
2563 return lhs->call_index - rhs->call_index;
2565 return lhs->insn_index - rhs->insn_index;
2571 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2572 const struct btrace_thread_info *btinfo,
2573 unsigned int number)
2575 const struct btrace_function *bfun;
2576 unsigned int upper, lower;
2578 if (btinfo->functions.empty ())
2582 bfun = &btinfo->functions[lower];
2583 if (number < bfun->insn_offset)
2586 upper = btinfo->functions.size () - 1;
2587 bfun = &btinfo->functions[upper];
2588 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2591 /* We assume that there are no holes in the numbering. */
2594 const unsigned int average = lower + (upper - lower) / 2;
2596 bfun = &btinfo->functions[average];
2598 if (number < bfun->insn_offset)
2600 upper = average - 1;
2604 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2606 lower = average + 1;
2613 it->btinfo = btinfo;
2614 it->call_index = bfun->number - 1;
2615 it->insn_index = number - bfun->insn_offset;
2619 /* Returns true if the recording ends with a function segment that
2620 contains only a single (i.e. the current) instruction. */
2623 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2625 const btrace_function *bfun;
2627 if (btinfo->functions.empty ())
2630 bfun = &btinfo->functions.back ();
2631 if (bfun->errcode != 0)
2634 return ftrace_call_num_insn (bfun) == 1;
2639 const struct btrace_function *
2640 btrace_call_get (const struct btrace_call_iterator *it)
2642 if (it->index >= it->btinfo->functions.size ())
2645 return &it->btinfo->functions[it->index];
2651 btrace_call_number (const struct btrace_call_iterator *it)
2653 const unsigned int length = it->btinfo->functions.size ();
2655 /* If the last function segment contains only a single instruction (i.e. the
2656 current instruction), skip it. */
2657 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2660 return it->index + 1;
2666 btrace_call_begin (struct btrace_call_iterator *it,
2667 const struct btrace_thread_info *btinfo)
2669 if (btinfo->functions.empty ())
2670 error (_("No trace."));
2672 it->btinfo = btinfo;
2679 btrace_call_end (struct btrace_call_iterator *it,
2680 const struct btrace_thread_info *btinfo)
2682 if (btinfo->functions.empty ())
2683 error (_("No trace."));
2685 it->btinfo = btinfo;
2686 it->index = btinfo->functions.size ();
2692 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2694 const unsigned int length = it->btinfo->functions.size ();
2696 if (it->index + stride < length - 1)
2697 /* Default case: Simply advance the iterator. */
2698 it->index += stride;
2699 else if (it->index + stride == length - 1)
2701 /* We land exactly at the last function segment. If it contains only one
2702 instruction (i.e. the current instruction) it is not actually part of
2704 if (btrace_ends_with_single_insn (it->btinfo))
2707 it->index = length - 1;
2711 /* We land past the last function segment and have to adjust the stride.
2712 If the last function segment contains only one instruction (i.e. the
2713 current instruction) it is not actually part of the trace. */
2714 if (btrace_ends_with_single_insn (it->btinfo))
2715 stride = length - it->index - 1;
2717 stride = length - it->index;
2728 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2730 const unsigned int length = it->btinfo->functions.size ();
2733 gdb_assert (it->index <= length);
2735 if (stride == 0 || it->index == 0)
2738 /* If we are at the end, the first step is a special case. If the last
2739 function segment contains only one instruction (i.e. the current
2740 instruction) it is not actually part of the trace. To be able to step
2741 over this instruction, we need at least one more function segment. */
2742 if ((it->index == length) && (length > 1))
2744 if (btrace_ends_with_single_insn (it->btinfo))
2745 it->index = length - 2;
2747 it->index = length - 1;
2753 stride = std::min (stride, it->index);
2755 it->index -= stride;
2756 return steps + stride;
2762 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2763 const struct btrace_call_iterator *rhs)
2765 gdb_assert (lhs->btinfo == rhs->btinfo);
2766 return (int) (lhs->index - rhs->index);
2772 btrace_find_call_by_number (struct btrace_call_iterator *it,
2773 const struct btrace_thread_info *btinfo,
2774 unsigned int number)
2776 const unsigned int length = btinfo->functions.size ();
2778 if ((number == 0) || (number > length))
2781 it->btinfo = btinfo;
2782 it->index = number - 1;
2789 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2790 const struct btrace_insn_iterator *begin,
2791 const struct btrace_insn_iterator *end)
2793 if (btinfo->insn_history == NULL)
2794 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2796 btinfo->insn_history->begin = *begin;
2797 btinfo->insn_history->end = *end;
2803 btrace_set_call_history (struct btrace_thread_info *btinfo,
2804 const struct btrace_call_iterator *begin,
2805 const struct btrace_call_iterator *end)
2807 gdb_assert (begin->btinfo == end->btinfo);
2809 if (btinfo->call_history == NULL)
2810 btinfo->call_history = XCNEW (struct btrace_call_history);
2812 btinfo->call_history->begin = *begin;
2813 btinfo->call_history->end = *end;
2819 btrace_is_replaying (struct thread_info *tp)
2821 return tp->btrace.replay != NULL;
2827 btrace_is_empty (struct thread_info *tp)
2829 struct btrace_insn_iterator begin, end;
2830 struct btrace_thread_info *btinfo;
2832 btinfo = &tp->btrace;
2834 if (btinfo->functions.empty ())
2837 btrace_insn_begin (&begin, btinfo);
2838 btrace_insn_end (&end, btinfo);
2840 return btrace_insn_cmp (&begin, &end) == 0;
2843 #if defined (HAVE_LIBIPT)
2845 /* Print a single packet. */
2848 pt_print_packet (const struct pt_packet *packet)
2850 switch (packet->type)
2853 printf_unfiltered (("[??: %x]"), packet->type);
2857 printf_unfiltered (("psb"));
2861 printf_unfiltered (("psbend"));
2865 printf_unfiltered (("pad"));
2869 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2870 packet->payload.ip.ipc,
2871 packet->payload.ip.ip);
2875 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2876 packet->payload.ip.ipc,
2877 packet->payload.ip.ip);
2881 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2882 packet->payload.ip.ipc,
2883 packet->payload.ip.ip);
2887 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2888 packet->payload.ip.ipc,
2889 packet->payload.ip.ip);
2893 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2894 packet->payload.tnt.bit_size,
2895 packet->payload.tnt.payload);
2899 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2900 packet->payload.tnt.bit_size,
2901 packet->payload.tnt.payload);
2905 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2906 packet->payload.pip.nr ? (" nr") : (""));
2910 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2914 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2918 switch (packet->payload.mode.leaf)
2921 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2925 printf_unfiltered (("mode.exec%s%s"),
2926 packet->payload.mode.bits.exec.csl
2928 packet->payload.mode.bits.exec.csd
2929 ? (" cs.d") : (""));
2933 printf_unfiltered (("mode.tsx%s%s"),
2934 packet->payload.mode.bits.tsx.intx
2936 packet->payload.mode.bits.tsx.abrt
2937 ? (" abrt") : (""));
2943 printf_unfiltered (("ovf"));
2947 printf_unfiltered (("stop"));
2951 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2955 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2956 packet->payload.tma.fc);
2960 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2964 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2968 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2973 /* Decode packets into MAINT using DECODER. */
2976 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2977 struct pt_packet_decoder *decoder)
2983 struct btrace_pt_packet packet;
2985 errcode = pt_pkt_sync_forward (decoder);
2991 pt_pkt_get_offset (decoder, &packet.offset);
2993 errcode = pt_pkt_next (decoder, &packet.packet,
2994 sizeof(packet.packet));
2998 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
3000 packet.errcode = pt_errcode (errcode);
3001 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
3006 if (errcode == -pte_eos)
3009 packet.errcode = pt_errcode (errcode);
3010 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
3013 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
3014 packet.offset, pt_errstr (packet.errcode));
3017 if (errcode != -pte_eos)
3018 warning (_("Failed to synchronize onto the Intel Processor Trace "
3019 "stream: %s."), pt_errstr (pt_errcode (errcode)));
3022 /* Update the packet history in BTINFO. */
3025 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
3027 struct pt_packet_decoder *decoder;
3028 const struct btrace_cpu *cpu;
3029 struct btrace_data_pt *pt;
3030 struct pt_config config;
3033 pt = &btinfo->data.variant.pt;
3035 /* Nothing to do if there is no trace. */
3039 memset (&config, 0, sizeof(config));
3041 config.size = sizeof (config);
3042 config.begin = pt->data;
3043 config.end = pt->data + pt->size;
3045 cpu = record_btrace_get_cpu ();
3047 cpu = &pt->config.cpu;
3049 /* We treat an unknown vendor as 'no errata'. */
3050 if (cpu->vendor != CV_UNKNOWN)
3052 config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
3053 config.cpu.family = cpu->family;
3054 config.cpu.model = cpu->model;
3055 config.cpu.stepping = cpu->stepping;
3057 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3059 error (_("Failed to configure the Intel Processor Trace "
3060 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
3063 decoder = pt_pkt_alloc_decoder (&config);
3064 if (decoder == NULL)
3065 error (_("Failed to allocate the Intel Processor Trace decoder."));
3069 btrace_maint_decode_pt (&btinfo->maint, decoder);
3071 CATCH (except, RETURN_MASK_ALL)
3073 pt_pkt_free_decoder (decoder);
3075 if (except.reason < 0)
3076 throw_exception (except);
3080 pt_pkt_free_decoder (decoder);
3083 #endif /* !defined (HAVE_LIBIPT) */
3085 /* Update the packet maintenance information for BTINFO and store the
3086 low and high bounds into BEGIN and END, respectively.
3087 Store the current iterator state into FROM and TO. */
3090 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3091 unsigned int *begin, unsigned int *end,
3092 unsigned int *from, unsigned int *to)
3094 switch (btinfo->data.format)
3103 case BTRACE_FORMAT_BTS:
3104 /* Nothing to do - we operate directly on BTINFO->DATA. */
3106 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3107 *from = btinfo->maint.variant.bts.packet_history.begin;
3108 *to = btinfo->maint.variant.bts.packet_history.end;
3111 #if defined (HAVE_LIBIPT)
3112 case BTRACE_FORMAT_PT:
3113 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3114 btrace_maint_update_pt_packets (btinfo);
3117 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3118 *from = btinfo->maint.variant.pt.packet_history.begin;
3119 *to = btinfo->maint.variant.pt.packet_history.end;
3121 #endif /* defined (HAVE_LIBIPT) */
3125 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3126 update the current iterator position. */
3129 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3130 unsigned int begin, unsigned int end)
3132 switch (btinfo->data.format)
3137 case BTRACE_FORMAT_BTS:
3139 VEC (btrace_block_s) *blocks;
3142 blocks = btinfo->data.variant.bts.blocks;
3143 for (blk = begin; blk < end; ++blk)
3145 const btrace_block_s *block;
3147 block = VEC_index (btrace_block_s, blocks, blk);
3149 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3150 core_addr_to_string_nz (block->begin),
3151 core_addr_to_string_nz (block->end));
3154 btinfo->maint.variant.bts.packet_history.begin = begin;
3155 btinfo->maint.variant.bts.packet_history.end = end;
3159 #if defined (HAVE_LIBIPT)
3160 case BTRACE_FORMAT_PT:
3162 VEC (btrace_pt_packet_s) *packets;
3165 packets = btinfo->maint.variant.pt.packets;
3166 for (pkt = begin; pkt < end; ++pkt)
3168 const struct btrace_pt_packet *packet;
3170 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3172 printf_unfiltered ("%u\t", pkt);
3173 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3175 if (packet->errcode == pte_ok)
3176 pt_print_packet (&packet->packet);
3178 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3180 printf_unfiltered ("\n");
3183 btinfo->maint.variant.pt.packet_history.begin = begin;
3184 btinfo->maint.variant.pt.packet_history.end = end;
3187 #endif /* defined (HAVE_LIBIPT) */
3191 /* Read a number from an argument string. */
3194 get_uint (const char **arg)
3196 const char *begin, *pos;
3198 unsigned long number;
3201 pos = skip_spaces (begin);
3203 if (!isdigit (*pos))
3204 error (_("Expected positive number, got: %s."), pos);
3206 number = strtoul (pos, &end, 10);
3207 if (number > UINT_MAX)
3208 error (_("Number too big."));
3210 *arg += (end - begin);
3212 return (unsigned int) number;
3215 /* Read a context size from an argument string. */
3218 get_context_size (const char **arg)
3220 const char *pos = skip_spaces (*arg);
3222 if (!isdigit (*pos))
3223 error (_("Expected positive number, got: %s."), pos);
3226 long result = strtol (pos, &end, 10);
3231 /* Complain about junk at the end of an argument string. */
3234 no_chunk (const char *arg)
3237 error (_("Junk after argument: %s."), arg);
3240 /* The "maintenance btrace packet-history" command. */
3243 maint_btrace_packet_history_cmd (const char *arg, int from_tty)
3245 struct btrace_thread_info *btinfo;
3246 unsigned int size, begin, end, from, to;
3248 thread_info *tp = find_thread_ptid (inferior_ptid);
3250 error (_("No thread."));
3253 btinfo = &tp->btrace;
3255 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3258 printf_unfiltered (_("No trace.\n"));
3262 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3266 if (end - from < size)
3270 else if (strcmp (arg, "-") == 0)
3274 if (to - begin < size)
3280 from = get_uint (&arg);
3282 error (_("'%u' is out of range."), from);
3284 arg = skip_spaces (arg);
3287 arg = skip_spaces (++arg);
3292 size = get_context_size (&arg);
3296 if (end - from < size)
3300 else if (*arg == '-')
3303 size = get_context_size (&arg);
3307 /* Include the packet given as first argument. */
3311 if (to - begin < size)
3317 to = get_uint (&arg);
3319 /* Include the packet at the second argument and silently
3320 truncate the range. */
3333 if (end - from < size)
3341 btrace_maint_print_packets (btinfo, from, to);
3344 /* The "maintenance btrace clear-packet-history" command. */
3347 maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
3349 if (args != NULL && *args != 0)
3350 error (_("Invalid argument."));
3352 if (inferior_ptid == null_ptid)
3353 error (_("No thread."));
3355 thread_info *tp = inferior_thread ();
3356 btrace_thread_info *btinfo = &tp->btrace;
3358 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3359 btrace_maint_clear (btinfo);
3360 btinfo->data.clear ();
3363 /* The "maintenance btrace clear" command. */
3366 maint_btrace_clear_cmd (const char *args, int from_tty)
3368 if (args != NULL && *args != 0)
3369 error (_("Invalid argument."));
3371 if (inferior_ptid == null_ptid)
3372 error (_("No thread."));
3374 thread_info *tp = inferior_thread ();
3378 /* The "maintenance btrace" command. */
3381 maint_btrace_cmd (const char *args, int from_tty)
3383 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3387 /* The "maintenance set btrace" command. */
3390 maint_btrace_set_cmd (const char *args, int from_tty)
3392 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3396 /* The "maintenance show btrace" command. */
3399 maint_btrace_show_cmd (const char *args, int from_tty)
3401 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3402 all_commands, gdb_stdout);
3405 /* The "maintenance set btrace pt" command. */
3408 maint_btrace_pt_set_cmd (const char *args, int from_tty)
3410 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3411 all_commands, gdb_stdout);
3414 /* The "maintenance show btrace pt" command. */
3417 maint_btrace_pt_show_cmd (const char *args, int from_tty)
3419 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3420 all_commands, gdb_stdout);
3423 /* The "maintenance info btrace" command. */
3426 maint_info_btrace_cmd (const char *args, int from_tty)
3428 struct btrace_thread_info *btinfo;
3429 const struct btrace_config *conf;
3431 if (args != NULL && *args != 0)
3432 error (_("Invalid argument."));
3434 if (inferior_ptid == null_ptid)
3435 error (_("No thread."));
3437 thread_info *tp = inferior_thread ();
3439 btinfo = &tp->btrace;
3441 conf = btrace_conf (btinfo);
3443 error (_("No btrace configuration."));
3445 printf_unfiltered (_("Format: %s.\n"),
3446 btrace_format_string (conf->format));
3448 switch (conf->format)
3453 case BTRACE_FORMAT_BTS:
3454 printf_unfiltered (_("Number of packets: %u.\n"),
3455 VEC_length (btrace_block_s,
3456 btinfo->data.variant.bts.blocks));
3459 #if defined (HAVE_LIBIPT)
3460 case BTRACE_FORMAT_PT:
3462 struct pt_version version;
3464 version = pt_library_version ();
3465 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3466 version.minor, version.build,
3467 version.ext != NULL ? version.ext : "");
3469 btrace_maint_update_pt_packets (btinfo);
3470 printf_unfiltered (_("Number of packets: %u.\n"),
3471 VEC_length (btrace_pt_packet_s,
3472 btinfo->maint.variant.pt.packets));
3475 #endif /* defined (HAVE_LIBIPT) */
3479 /* The "maint show btrace pt skip-pad" show value function. */
3482 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3483 struct cmd_list_element *c,
3486 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3490 /* Initialize btrace maintenance commands. */
3493 _initialize_btrace (void)
3495 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3496 _("Info about branch tracing data."), &maintenanceinfolist);
3498 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3499 _("Branch tracing maintenance commands."),
3500 &maint_btrace_cmdlist, "maintenance btrace ",
3501 0, &maintenancelist);
3503 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3504 Set branch tracing specific variables."),
3505 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3506 0, &maintenance_set_cmdlist);
3508 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3509 Set Intel Processor Trace specific variables."),
3510 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3511 0, &maint_btrace_set_cmdlist);
3513 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3514 Show branch tracing specific variables."),
3515 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3516 0, &maintenance_show_cmdlist);
3518 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3519 Show Intel Processor Trace specific variables."),
3520 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3521 0, &maint_btrace_show_cmdlist);
3523 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3524 &maint_btrace_pt_skip_pad, _("\
3525 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3526 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3527 When enabled, PAD packets are ignored in the btrace packet history."),
3528 NULL, show_maint_btrace_pt_skip_pad,
3529 &maint_btrace_pt_set_cmdlist,
3530 &maint_btrace_pt_show_cmdlist);
3532 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3533 _("Print the raw branch tracing data.\n\
3534 With no argument, print ten more packets after the previous ten-line print.\n\
3535 With '-' as argument print ten packets before a previous ten-line print.\n\
3536 One argument specifies the starting packet of a ten-line print.\n\
3537 Two arguments with comma between specify starting and ending packets to \
3539 Preceded with '+'/'-' the second argument specifies the distance from the \
3541 &maint_btrace_cmdlist);
3543 add_cmd ("clear-packet-history", class_maintenance,
3544 maint_btrace_clear_packet_history_cmd,
3545 _("Clears the branch tracing packet history.\n\
3546 Discards the raw branch tracing data but not the execution history data.\n\
3548 &maint_btrace_cmdlist);
3550 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3551 _("Clears the branch tracing data.\n\
3552 Discards the raw branch tracing data and the execution history data.\n\
3553 The next 'record' command will fetch the branch tracing data anew.\n\
3555 &maint_btrace_cmdlist);