1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
36 #include "cli/cli-utils.h"
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
56 static void btrace_add_pc (struct thread_info *tp);
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
61 #define DEBUG(msg, args...) \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
76 ftrace_print_function_name (const struct btrace_function *bfun)
78 struct minimal_symbol *msym;
85 return SYMBOL_PRINT_NAME (sym);
88 return MSYMBOL_PRINT_NAME (msym);
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
97 ftrace_print_filename (const struct btrace_function *bfun)
100 const char *filename;
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
107 filename = "<unknown>";
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
121 return core_addr_to_string_nz (insn->pc);
124 /* Print an ftrace debug status message. */
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
129 const char *fun, *file;
130 unsigned int ibegin, iend;
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
144 /* Return the number of instructions in a given function call segment. */
147 ftrace_call_num_insn (const struct btrace_function* bfun)
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
156 return VEC_length (btrace_insn_s, bfun->insn);
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
163 ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
167 struct minimal_symbol *msym;
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
181 const char *bfname, *fname;
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
187 /* Check the location of those functions, as well. */
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
190 if (filename_cmp (fname, bfname) != 0)
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
205 /* Allocate and initialize a new branch trace function segment.
206 PREV is the chronologically preceding function segment.
207 MFUN and FUN are the symbol information we have for this function. */
209 static struct btrace_function *
210 ftrace_new_function (struct btrace_function *prev,
211 struct minimal_symbol *mfun,
214 struct btrace_function *bfun;
216 bfun = XCNEW (struct btrace_function);
220 bfun->flow.prev = prev;
224 /* Start counting at one. */
226 bfun->insn_offset = 1;
230 gdb_assert (prev->flow.next == NULL);
231 prev->flow.next = bfun;
233 bfun->number = prev->number + 1;
234 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
235 bfun->level = prev->level;
241 /* Update the UP field of a function segment. */
244 ftrace_update_caller (struct btrace_function *bfun,
245 struct btrace_function *caller,
246 enum btrace_function_flag flags)
248 if (bfun->up != NULL)
249 ftrace_debug (bfun, "updating caller");
254 ftrace_debug (bfun, "set caller");
255 ftrace_debug (caller, "..to");
258 /* Fix up the caller for all segments of a function. */
261 ftrace_fixup_caller (struct btrace_function *bfun,
262 struct btrace_function *caller,
263 enum btrace_function_flag flags)
265 struct btrace_function *prev, *next;
267 ftrace_update_caller (bfun, caller, flags);
269 /* Update all function segments belonging to the same function. */
270 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
271 ftrace_update_caller (prev, caller, flags);
273 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
274 ftrace_update_caller (next, caller, flags);
277 /* Add a new function segment for a call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
281 static struct btrace_function *
282 ftrace_new_call (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
286 struct btrace_function *bfun;
288 bfun = ftrace_new_function (caller, mfun, fun);
292 ftrace_debug (bfun, "new call");
297 /* Add a new function segment for a tail call.
298 CALLER is the chronologically preceding function segment.
299 MFUN and FUN are the symbol information we have for this function. */
301 static struct btrace_function *
302 ftrace_new_tailcall (struct btrace_function *caller,
303 struct minimal_symbol *mfun,
306 struct btrace_function *bfun;
308 bfun = ftrace_new_function (caller, mfun, fun);
311 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
313 ftrace_debug (bfun, "new tail call");
318 /* Return the caller of BFUN or NULL if there is none. This function skips
319 tail calls in the call chain. */
320 static struct btrace_function *
321 ftrace_get_caller (struct btrace_function *bfun)
323 for (; bfun != NULL; bfun = bfun->up)
324 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
330 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
331 symbol information. */
333 static struct btrace_function *
334 ftrace_find_caller (struct btrace_function *bfun,
335 struct minimal_symbol *mfun,
338 for (; bfun != NULL; bfun = bfun->up)
340 /* Skip functions with incompatible symbol information. */
341 if (ftrace_function_switched (bfun, mfun, fun))
344 /* This is the function segment we're looking for. */
351 /* Find the innermost caller in the back trace of BFUN, skipping all
352 function segments that do not end with a call instruction (e.g.
353 tail calls ending with a jump). */
355 static struct btrace_function *
356 ftrace_find_call (struct btrace_function *bfun)
358 for (; bfun != NULL; bfun = bfun->up)
360 struct btrace_insn *last;
363 if (bfun->errcode != 0)
366 last = VEC_last (btrace_insn_s, bfun->insn);
368 if (last->iclass == BTRACE_INSN_CALL)
375 /* Add a continuation segment for a function into which we return.
376 PREV is the chronologically preceding function segment.
377 MFUN and FUN are the symbol information we have for this function. */
379 static struct btrace_function *
380 ftrace_new_return (struct btrace_function *prev,
381 struct minimal_symbol *mfun,
384 struct btrace_function *bfun, *caller;
386 bfun = ftrace_new_function (prev, mfun, fun);
388 /* It is important to start at PREV's caller. Otherwise, we might find
389 PREV itself, if PREV is a recursive function. */
390 caller = ftrace_find_caller (prev->up, mfun, fun);
393 /* The caller of PREV is the preceding btrace function segment in this
394 function instance. */
395 gdb_assert (caller->segment.next == NULL);
397 caller->segment.next = bfun;
398 bfun->segment.prev = caller;
400 /* Maintain the function level. */
401 bfun->level = caller->level;
403 /* Maintain the call stack. */
404 bfun->up = caller->up;
405 bfun->flags = caller->flags;
407 ftrace_debug (bfun, "new return");
411 /* We did not find a caller. This could mean that something went
412 wrong or that the call is simply not included in the trace. */
414 /* Let's search for some actual call. */
415 caller = ftrace_find_call (prev->up);
418 /* There is no call in PREV's back trace. We assume that the
419 branch trace did not include it. */
421 /* Let's find the topmost function and add a new caller for it.
422 This should handle a series of initial tail calls. */
423 while (prev->up != NULL)
426 bfun->level = prev->level - 1;
428 /* Fix up the call stack for PREV. */
429 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
431 ftrace_debug (bfun, "new return - no caller");
435 /* There is a call in PREV's back trace to which we should have
436 returned but didn't. Let's start a new, separate back trace
437 from PREV's level. */
438 bfun->level = prev->level - 1;
440 /* We fix up the back trace for PREV but leave other function segments
441 on the same level as they are.
442 This should handle things like schedule () correctly where we're
443 switching contexts. */
445 prev->flags = BFUN_UP_LINKS_TO_RET;
447 ftrace_debug (bfun, "new return - unknown caller");
454 /* Add a new function segment for a function switch.
455 PREV is the chronologically preceding function segment.
456 MFUN and FUN are the symbol information we have for this function. */
458 static struct btrace_function *
459 ftrace_new_switch (struct btrace_function *prev,
460 struct minimal_symbol *mfun,
463 struct btrace_function *bfun;
465 /* This is an unexplained function switch. We can't really be sure about the
466 call stack, yet the best I can think of right now is to preserve it. */
467 bfun = ftrace_new_function (prev, mfun, fun);
469 bfun->flags = prev->flags;
471 ftrace_debug (bfun, "new switch");
476 /* Add a new function segment for a gap in the trace due to a decode error.
477 PREV is the chronologically preceding function segment.
478 ERRCODE is the format-specific error code. */
480 static struct btrace_function *
481 ftrace_new_gap (struct btrace_function *prev, int errcode)
483 struct btrace_function *bfun;
485 /* We hijack prev if it was empty. */
486 if (prev != NULL && prev->errcode == 0
487 && VEC_empty (btrace_insn_s, prev->insn))
490 bfun = ftrace_new_function (prev, NULL, NULL);
492 bfun->errcode = errcode;
494 ftrace_debug (bfun, "new gap");
499 /* Update BFUN with respect to the instruction at PC. This may create new
501 Return the chronologically latest function segment, never NULL. */
503 static struct btrace_function *
504 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
506 struct bound_minimal_symbol bmfun;
507 struct minimal_symbol *mfun;
509 struct btrace_insn *last;
511 /* Try to determine the function we're in. We use both types of symbols
512 to avoid surprises when we sometimes get a full symbol and sometimes
513 only a minimal symbol. */
514 fun = find_pc_function (pc);
515 bmfun = lookup_minimal_symbol_by_pc (pc);
518 if (fun == NULL && mfun == NULL)
519 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
521 /* If we didn't have a function or if we had a gap before, we create one. */
522 if (bfun == NULL || bfun->errcode != 0)
523 return ftrace_new_function (bfun, mfun, fun);
525 /* Check the last instruction, if we have one.
526 We do this check first, since it allows us to fill in the call stack
527 links in addition to the normal flow links. */
529 if (!VEC_empty (btrace_insn_s, bfun->insn))
530 last = VEC_last (btrace_insn_s, bfun->insn);
534 switch (last->iclass)
536 case BTRACE_INSN_RETURN:
540 /* On some systems, _dl_runtime_resolve returns to the resolved
541 function instead of jumping to it. From our perspective,
542 however, this is a tailcall.
543 If we treated it as return, we wouldn't be able to find the
544 resolved function in our stack back trace. Hence, we would
545 lose the current stack back trace and start anew with an empty
546 back trace. When the resolved function returns, we would then
547 create a stack back trace with the same function names but
548 different frame id's. This will confuse stepping. */
549 fname = ftrace_print_function_name (bfun);
550 if (strcmp (fname, "_dl_runtime_resolve") == 0)
551 return ftrace_new_tailcall (bfun, mfun, fun);
553 return ftrace_new_return (bfun, mfun, fun);
556 case BTRACE_INSN_CALL:
557 /* Ignore calls to the next instruction. They are used for PIC. */
558 if (last->pc + last->size == pc)
561 return ftrace_new_call (bfun, mfun, fun);
563 case BTRACE_INSN_JUMP:
567 start = get_pc_function_start (pc);
569 /* A jump to the start of a function is (typically) a tail call. */
571 return ftrace_new_tailcall (bfun, mfun, fun);
573 /* If we can't determine the function for PC, we treat a jump at
574 the end of the block as tail call if we're switching functions
575 and as an intra-function branch if we don't. */
576 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
577 return ftrace_new_tailcall (bfun, mfun, fun);
584 /* Check if we're switching functions for some other reason. */
585 if (ftrace_function_switched (bfun, mfun, fun))
587 DEBUG_FTRACE ("switching from %s in %s at %s",
588 ftrace_print_insn_addr (last),
589 ftrace_print_function_name (bfun),
590 ftrace_print_filename (bfun));
592 return ftrace_new_switch (bfun, mfun, fun);
598 /* Add the instruction at PC to BFUN's instructions. */
601 ftrace_update_insns (struct btrace_function *bfun,
602 const struct btrace_insn *insn)
604 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
606 if (record_debug > 1)
607 ftrace_debug (bfun, "update insn");
610 /* Classify the instruction at PC. */
612 static enum btrace_insn_class
613 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
615 enum btrace_insn_class iclass;
617 iclass = BTRACE_INSN_OTHER;
620 if (gdbarch_insn_is_call (gdbarch, pc))
621 iclass = BTRACE_INSN_CALL;
622 else if (gdbarch_insn_is_ret (gdbarch, pc))
623 iclass = BTRACE_INSN_RETURN;
624 else if (gdbarch_insn_is_jump (gdbarch, pc))
625 iclass = BTRACE_INSN_JUMP;
627 CATCH (error, RETURN_MASK_ERROR)
635 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
636 number of matching function segments or zero if the back traces do not
640 ftrace_match_backtrace (struct btrace_function *lhs,
641 struct btrace_function *rhs)
645 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
647 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
650 lhs = ftrace_get_caller (lhs);
651 rhs = ftrace_get_caller (rhs);
657 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
660 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
665 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
666 ftrace_debug (bfun, "..bfun");
668 for (; bfun != NULL; bfun = bfun->flow.next)
669 bfun->level += adjustment;
672 /* Recompute the global level offset. Traverse the function trace and compute
673 the global level offset as the negative of the minimal function level. */
676 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
678 struct btrace_function *bfun, *end;
684 bfun = btinfo->begin;
688 /* The last function segment contains the current instruction, which is not
689 really part of the trace. If it contains just this one instruction, we
690 stop when we reach it; otherwise, we let the below loop run to the end. */
692 if (VEC_length (btrace_insn_s, end->insn) > 1)
696 for (; bfun != end; bfun = bfun->flow.next)
697 level = std::min (level, bfun->level);
699 DEBUG_FTRACE ("setting global level offset: %d", -level);
700 btinfo->level = -level;
703 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
704 ftrace_connect_backtrace. */
707 ftrace_connect_bfun (struct btrace_function *prev,
708 struct btrace_function *next)
710 DEBUG_FTRACE ("connecting...");
711 ftrace_debug (prev, "..prev");
712 ftrace_debug (next, "..next");
714 /* The function segments are not yet connected. */
715 gdb_assert (prev->segment.next == NULL);
716 gdb_assert (next->segment.prev == NULL);
718 prev->segment.next = next;
719 next->segment.prev = prev;
721 /* We may have moved NEXT to a different function level. */
722 ftrace_fixup_level (next, prev->level - next->level);
724 /* If we run out of back trace for one, let's use the other's. */
725 if (prev->up == NULL)
727 if (next->up != NULL)
729 DEBUG_FTRACE ("using next's callers");
730 ftrace_fixup_caller (prev, next->up, next->flags);
733 else if (next->up == NULL)
735 if (prev->up != NULL)
737 DEBUG_FTRACE ("using prev's callers");
738 ftrace_fixup_caller (next, prev->up, prev->flags);
743 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
744 link to add the tail callers to NEXT's back trace.
746 This removes NEXT->UP from NEXT's back trace. It will be added back
747 when connecting NEXT and PREV's callers - provided they exist.
749 If PREV's back trace consists of a series of tail calls without an
750 actual call, there will be no further connection and NEXT's caller will
751 be removed for good. To catch this case, we handle it here and connect
752 the top of PREV's back trace to NEXT's caller. */
753 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
755 struct btrace_function *caller;
756 btrace_function_flags flags;
758 /* We checked NEXT->UP above so CALLER can't be NULL. */
762 DEBUG_FTRACE ("adding prev's tail calls to next");
764 ftrace_fixup_caller (next, prev->up, prev->flags);
766 for (prev = prev->up; prev != NULL; prev = prev->up)
768 /* At the end of PREV's back trace, continue with CALLER. */
769 if (prev->up == NULL)
771 DEBUG_FTRACE ("fixing up link for tailcall chain");
772 ftrace_debug (prev, "..top");
773 ftrace_debug (caller, "..up");
775 ftrace_fixup_caller (prev, caller, flags);
777 /* If we skipped any tail calls, this may move CALLER to a
778 different function level.
780 Note that changing CALLER's level is only OK because we
781 know that this is the last iteration of the bottom-to-top
782 walk in ftrace_connect_backtrace.
784 Otherwise we will fix up CALLER's level when we connect it
785 to PREV's caller in the next iteration. */
786 ftrace_fixup_level (caller, prev->level - caller->level - 1);
790 /* There's nothing to do if we find a real call. */
791 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
793 DEBUG_FTRACE ("will fix up link in next iteration");
801 /* Connect function segments on the same level in the back trace at LHS and RHS.
802 The back traces at LHS and RHS are expected to match according to
803 ftrace_match_backtrace. */
806 ftrace_connect_backtrace (struct btrace_function *lhs,
807 struct btrace_function *rhs)
809 while (lhs != NULL && rhs != NULL)
811 struct btrace_function *prev, *next;
813 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
815 /* Connecting LHS and RHS may change the up link. */
819 lhs = ftrace_get_caller (lhs);
820 rhs = ftrace_get_caller (rhs);
822 ftrace_connect_bfun (prev, next);
826 /* Bridge the gap between two function segments left and right of a gap if their
827 respective back traces match in at least MIN_MATCHES functions.
829 Returns non-zero if the gap could be bridged, zero otherwise. */
832 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
835 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
838 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
839 rhs->insn_offset - 1, min_matches);
845 /* We search the back traces of LHS and RHS for valid connections and connect
846 the two functon segments that give the longest combined back trace. */
848 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
849 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
853 matches = ftrace_match_backtrace (cand_l, cand_r);
854 if (best_matches < matches)
856 best_matches = matches;
862 /* We need at least MIN_MATCHES matches. */
863 gdb_assert (min_matches > 0);
864 if (best_matches < min_matches)
867 DEBUG_FTRACE ("..matches: %d", best_matches);
869 /* We will fix up the level of BEST_R and succeeding function segments such
870 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
872 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
873 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
875 To catch this, we already fix up the level here where we can start at RHS
876 instead of at BEST_R. We will ignore the level fixup when connecting
877 BEST_L to BEST_R as they will already be on the same level. */
878 ftrace_fixup_level (rhs, best_l->level - best_r->level);
880 ftrace_connect_backtrace (best_l, best_r);
885 /* Try to bridge gaps due to overflow or decode errors by connecting the
886 function segments that are separated by the gap. */
889 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
891 VEC (bfun_s) *remaining;
892 struct cleanup *old_chain;
895 DEBUG ("bridge gaps");
898 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
900 /* We require a minimum amount of matches for bridging a gap. The number of
901 required matches will be lowered with each iteration.
903 The more matches the higher our confidence that the bridging is correct.
904 For big gaps or small traces, however, it may not be feasible to require a
905 high number of matches. */
906 for (min_matches = 5; min_matches > 0; --min_matches)
908 /* Let's try to bridge as many gaps as we can. In some cases, we need to
909 skip a gap and revisit it again after we closed later gaps. */
910 while (!VEC_empty (bfun_s, *gaps))
912 struct btrace_function *gap;
915 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
917 struct btrace_function *lhs, *rhs;
920 /* We may have a sequence of gaps if we run from one error into
921 the next as we try to re-sync onto the trace stream. Ignore
922 all but the leftmost gap in such a sequence.
924 Also ignore gaps at the beginning of the trace. */
925 lhs = gap->flow.prev;
926 if (lhs == NULL || lhs->errcode != 0)
929 /* Skip gaps to the right. */
930 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
931 if (rhs->errcode == 0)
934 /* Ignore gaps at the end of the trace. */
938 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
940 /* Keep track of gaps we were not able to bridge and try again.
941 If we just pushed them to the end of GAPS we would risk an
942 infinite loop in case we simply cannot bridge a gap. */
944 VEC_safe_push (bfun_s, remaining, gap);
947 /* Let's see if we made any progress. */
948 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
951 VEC_free (bfun_s, *gaps);
957 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
958 if (VEC_empty (bfun_s, *gaps))
961 VEC_free (bfun_s, remaining);
964 do_cleanups (old_chain);
966 /* We may omit this in some cases. Not sure it is worth the extra
967 complication, though. */
968 ftrace_compute_global_level_offset (&tp->btrace);
971 /* Compute the function branch trace from BTS trace. */
974 btrace_compute_ftrace_bts (struct thread_info *tp,
975 const struct btrace_data_bts *btrace,
978 struct btrace_thread_info *btinfo;
979 struct btrace_function *begin, *end;
980 struct gdbarch *gdbarch;
984 gdbarch = target_gdbarch ();
985 btinfo = &tp->btrace;
986 begin = btinfo->begin;
988 level = begin != NULL ? -btinfo->level : INT_MAX;
989 blk = VEC_length (btrace_block_s, btrace->blocks);
993 btrace_block_s *block;
998 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1003 struct btrace_insn insn;
1006 /* We should hit the end of the block. Warn if we went too far. */
1007 if (block->end < pc)
1009 /* Indicate the gap in the trace. */
1010 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
1014 VEC_safe_push (bfun_s, *gaps, end);
1016 warning (_("Recorded trace may be corrupted at instruction "
1017 "%u (pc = %s)."), end->insn_offset - 1,
1018 core_addr_to_string_nz (pc));
1023 end = ftrace_update_function (end, pc);
1027 /* Maintain the function level offset.
1028 For all but the last block, we do it here. */
1030 level = std::min (level, end->level);
1035 size = gdb_insn_length (gdbarch, pc);
1037 CATCH (error, RETURN_MASK_ERROR)
1044 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1047 ftrace_update_insns (end, &insn);
1049 /* We're done once we pushed the instruction at the end. */
1050 if (block->end == pc)
1053 /* We can't continue if we fail to compute the size. */
1056 /* Indicate the gap in the trace. We just added INSN so we're
1057 not at the beginning. */
1058 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
1060 VEC_safe_push (bfun_s, *gaps, end);
1062 warning (_("Recorded trace may be incomplete at instruction %u "
1063 "(pc = %s)."), end->insn_offset - 1,
1064 core_addr_to_string_nz (pc));
1071 /* Maintain the function level offset.
1072 For the last block, we do it here to not consider the last
1074 Since the last instruction corresponds to the current instruction
1075 and is not really part of the execution history, it shouldn't
1076 affect the level. */
1078 level = std::min (level, end->level);
1082 btinfo->begin = begin;
1085 /* LEVEL is the minimal function level of all btrace function segments.
1086 Define the global level offset to -LEVEL so all function levels are
1087 normalized to start at zero. */
1088 btinfo->level = -level;
1091 #if defined (HAVE_LIBIPT)
1093 static enum btrace_insn_class
1094 pt_reclassify_insn (enum pt_insn_class iclass)
1099 return BTRACE_INSN_CALL;
1102 return BTRACE_INSN_RETURN;
1105 return BTRACE_INSN_JUMP;
1108 return BTRACE_INSN_OTHER;
1112 /* Return the btrace instruction flags for INSN. */
1114 static btrace_insn_flags
1115 pt_btrace_insn_flags (const struct pt_insn *insn)
1117 btrace_insn_flags flags = 0;
1119 if (insn->speculative)
1120 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1125 /* Add function branch trace using DECODER. */
1128 ftrace_add_pt (struct pt_insn_decoder *decoder,
1129 struct btrace_function **pbegin,
1130 struct btrace_function **pend, int *plevel,
1131 VEC (bfun_s) **gaps)
1133 struct btrace_function *begin, *end, *upd;
1141 struct btrace_insn btinsn;
1142 struct pt_insn insn;
1144 errcode = pt_insn_sync_forward (decoder);
1147 if (errcode != -pte_eos)
1148 warning (_("Failed to synchronize onto the Intel Processor "
1149 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1153 memset (&btinsn, 0, sizeof (btinsn));
1156 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1160 /* Look for gaps in the trace - unless we're at the beginning. */
1163 /* Tracing is disabled and re-enabled each time we enter the
1164 kernel. Most times, we continue from the same instruction we
1165 stopped before. This is indicated via the RESUMED instruction
1166 flag. The ENABLED instruction flag means that we continued
1167 from some other instruction. Indicate this as a trace gap. */
1170 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
1172 VEC_safe_push (bfun_s, *gaps, end);
1174 pt_insn_get_offset (decoder, &offset);
1176 warning (_("Non-contiguous trace at instruction %u (offset "
1177 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1178 end->insn_offset - 1, offset, insn.ip);
1182 /* Indicate trace overflows. */
1185 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
1187 *pbegin = begin = end;
1189 VEC_safe_push (bfun_s, *gaps, end);
1191 pt_insn_get_offset (decoder, &offset);
1193 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1194 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
1198 upd = ftrace_update_function (end, insn.ip);
1204 *pbegin = begin = upd;
1207 /* Maintain the function level offset. */
1208 *plevel = std::min (*plevel, end->level);
1210 btinsn.pc = (CORE_ADDR) insn.ip;
1211 btinsn.size = (gdb_byte) insn.size;
1212 btinsn.iclass = pt_reclassify_insn (insn.iclass);
1213 btinsn.flags = pt_btrace_insn_flags (&insn);
1215 ftrace_update_insns (end, &btinsn);
1218 if (errcode == -pte_eos)
1221 /* Indicate the gap in the trace. */
1222 *pend = end = ftrace_new_gap (end, errcode);
1224 *pbegin = begin = end;
1226 VEC_safe_push (bfun_s, *gaps, end);
1228 pt_insn_get_offset (decoder, &offset);
1230 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1231 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
1232 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1236 /* A callback function to allow the trace decoder to read the inferior's
1240 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1241 const struct pt_asid *asid, uint64_t pc,
1244 int result, errcode;
1246 result = (int) size;
1249 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1251 result = -pte_nomap;
1253 CATCH (error, RETURN_MASK_ERROR)
1255 result = -pte_nomap;
1262 /* Translate the vendor from one enum to another. */
1264 static enum pt_cpu_vendor
1265 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1277 /* Finalize the function branch trace after decode. */
1279 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1280 struct thread_info *tp, int level)
1282 pt_insn_free_decoder (decoder);
1284 /* LEVEL is the minimal function level of all btrace function segments.
1285 Define the global level offset to -LEVEL so all function levels are
1286 normalized to start at zero. */
1287 tp->btrace.level = -level;
1289 /* Add a single last instruction entry for the current PC.
1290 This allows us to compute the backtrace at the current PC using both
1291 standard unwind and btrace unwind.
1292 This extra entry is ignored by all record commands. */
1296 /* Compute the function branch trace from Intel Processor Trace
1300 btrace_compute_ftrace_pt (struct thread_info *tp,
1301 const struct btrace_data_pt *btrace,
1302 VEC (bfun_s) **gaps)
1304 struct btrace_thread_info *btinfo;
1305 struct pt_insn_decoder *decoder;
1306 struct pt_config config;
1309 if (btrace->size == 0)
1312 btinfo = &tp->btrace;
1313 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
1315 pt_config_init(&config);
1316 config.begin = btrace->data;
1317 config.end = btrace->data + btrace->size;
1319 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1320 config.cpu.family = btrace->config.cpu.family;
1321 config.cpu.model = btrace->config.cpu.model;
1322 config.cpu.stepping = btrace->config.cpu.stepping;
1324 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1326 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1327 pt_errstr (pt_errcode (errcode)));
1329 decoder = pt_insn_alloc_decoder (&config);
1330 if (decoder == NULL)
1331 error (_("Failed to allocate the Intel Processor Trace decoder."));
1335 struct pt_image *image;
1337 image = pt_insn_get_image(decoder);
1339 error (_("Failed to configure the Intel Processor Trace decoder."));
1341 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1343 error (_("Failed to configure the Intel Processor Trace decoder: "
1344 "%s."), pt_errstr (pt_errcode (errcode)));
1346 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
1348 CATCH (error, RETURN_MASK_ALL)
1350 /* Indicate a gap in the trace if we quit trace processing. */
1351 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
1353 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
1355 VEC_safe_push (bfun_s, *gaps, btinfo->end);
1358 btrace_finalize_ftrace_pt (decoder, tp, level);
1360 throw_exception (error);
1364 btrace_finalize_ftrace_pt (decoder, tp, level);
1367 #else /* defined (HAVE_LIBIPT) */
1370 btrace_compute_ftrace_pt (struct thread_info *tp,
1371 const struct btrace_data_pt *btrace,
1372 VEC (bfun_s) **gaps)
1374 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1377 #endif /* defined (HAVE_LIBIPT) */
1379 /* Compute the function branch trace from a block branch trace BTRACE for
1380 a thread given by BTINFO. */
1383 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1384 VEC (bfun_s) **gaps)
1386 DEBUG ("compute ftrace");
1388 switch (btrace->format)
1390 case BTRACE_FORMAT_NONE:
1393 case BTRACE_FORMAT_BTS:
1394 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1397 case BTRACE_FORMAT_PT:
1398 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1402 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1406 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1408 if (!VEC_empty (bfun_s, *gaps))
1410 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1411 btrace_bridge_gaps (tp, gaps);
1416 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1419 struct cleanup *old_chain;
1422 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1426 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1428 CATCH (error, RETURN_MASK_ALL)
1430 btrace_finalize_ftrace (tp, &gaps);
1432 throw_exception (error);
1436 btrace_finalize_ftrace (tp, &gaps);
1438 do_cleanups (old_chain);
1441 /* Add an entry for the current PC. */
1444 btrace_add_pc (struct thread_info *tp)
1446 struct btrace_data btrace;
1447 struct btrace_block *block;
1448 struct regcache *regcache;
1449 struct cleanup *cleanup;
1452 regcache = get_thread_regcache (tp->ptid);
1453 pc = regcache_read_pc (regcache);
1455 btrace_data_init (&btrace);
1456 btrace.format = BTRACE_FORMAT_BTS;
1457 btrace.variant.bts.blocks = NULL;
1459 cleanup = make_cleanup_btrace_data (&btrace);
1461 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1465 btrace_compute_ftrace (tp, &btrace);
1467 do_cleanups (cleanup);
1473 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1475 if (tp->btrace.target != NULL)
1478 #if !defined (HAVE_LIBIPT)
1479 if (conf->format == BTRACE_FORMAT_PT)
1480 error (_("GDB does not support Intel Processor Trace."));
1481 #endif /* !defined (HAVE_LIBIPT) */
1483 if (!target_supports_btrace (conf->format))
1484 error (_("Target does not support branch tracing."));
1486 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1487 target_pid_to_str (tp->ptid));
1489 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1491 /* We're done if we failed to enable tracing. */
1492 if (tp->btrace.target == NULL)
1495 /* We need to undo the enable in case of errors. */
1498 /* Add an entry for the current PC so we start tracing from where we
1501 If we can't access TP's registers, TP is most likely running. In this
1502 case, we can't really say where tracing was enabled so it should be
1503 safe to simply skip this step.
1505 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1506 start at the PC at which tracing was enabled. */
1507 if (conf->format != BTRACE_FORMAT_PT
1508 && can_access_registers_ptid (tp->ptid))
1511 CATCH (exception, RETURN_MASK_ALL)
1513 btrace_disable (tp);
1515 throw_exception (exception);
1522 const struct btrace_config *
1523 btrace_conf (const struct btrace_thread_info *btinfo)
1525 if (btinfo->target == NULL)
1528 return target_btrace_conf (btinfo->target);
1534 btrace_disable (struct thread_info *tp)
1536 struct btrace_thread_info *btp = &tp->btrace;
1539 if (btp->target == NULL)
1542 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1543 target_pid_to_str (tp->ptid));
1545 target_disable_btrace (btp->target);
1554 btrace_teardown (struct thread_info *tp)
1556 struct btrace_thread_info *btp = &tp->btrace;
1559 if (btp->target == NULL)
1562 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1563 target_pid_to_str (tp->ptid));
1565 target_teardown_btrace (btp->target);
1571 /* Stitch branch trace in BTS format. */
1574 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1576 struct btrace_thread_info *btinfo;
1577 struct btrace_function *last_bfun;
1578 struct btrace_insn *last_insn;
1579 btrace_block_s *first_new_block;
1581 btinfo = &tp->btrace;
1582 last_bfun = btinfo->end;
1583 gdb_assert (last_bfun != NULL);
1584 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1586 /* If the existing trace ends with a gap, we just glue the traces
1587 together. We need to drop the last (i.e. chronologically first) block
1588 of the new trace, though, since we can't fill in the start address.*/
1589 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1591 VEC_pop (btrace_block_s, btrace->blocks);
1595 /* Beware that block trace starts with the most recent block, so the
1596 chronologically first block in the new trace is the last block in
1597 the new trace's block vector. */
1598 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1599 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1601 /* If the current PC at the end of the block is the same as in our current
1602 trace, there are two explanations:
1603 1. we executed the instruction and some branch brought us back.
1604 2. we have not made any progress.
1605 In the first case, the delta trace vector should contain at least two
1607 In the second case, the delta trace vector should contain exactly one
1608 entry for the partial block containing the current PC. Remove it. */
1609 if (first_new_block->end == last_insn->pc
1610 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1612 VEC_pop (btrace_block_s, btrace->blocks);
1616 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1617 core_addr_to_string_nz (first_new_block->end));
1619 /* Do a simple sanity check to make sure we don't accidentally end up
1620 with a bad block. This should not occur in practice. */
1621 if (first_new_block->end < last_insn->pc)
1623 warning (_("Error while trying to read delta trace. Falling back to "
1628 /* We adjust the last block to start at the end of our current trace. */
1629 gdb_assert (first_new_block->begin == 0);
1630 first_new_block->begin = last_insn->pc;
1632 /* We simply pop the last insn so we can insert it again as part of
1633 the normal branch trace computation.
1634 Since instruction iterators are based on indices in the instructions
1635 vector, we don't leave any pointers dangling. */
1636 DEBUG ("pruning insn at %s for stitching",
1637 ftrace_print_insn_addr (last_insn));
1639 VEC_pop (btrace_insn_s, last_bfun->insn);
1641 /* The instructions vector may become empty temporarily if this has
1642 been the only instruction in this function segment.
1643 This violates the invariant but will be remedied shortly by
1644 btrace_compute_ftrace when we add the new trace. */
1646 /* The only case where this would hurt is if the entire trace consisted
1647 of just that one instruction. If we remove it, we might turn the now
1648 empty btrace function segment into a gap. But we don't want gaps at
1649 the beginning. To avoid this, we remove the entire old trace. */
1650 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1656 /* Adjust the block trace in order to stitch old and new trace together.
1657 BTRACE is the new delta trace between the last and the current stop.
1658 TP is the traced thread.
1659 May modifx BTRACE as well as the existing trace in TP.
1660 Return 0 on success, -1 otherwise. */
1663 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1665 /* If we don't have trace, there's nothing to do. */
1666 if (btrace_data_empty (btrace))
1669 switch (btrace->format)
1671 case BTRACE_FORMAT_NONE:
1674 case BTRACE_FORMAT_BTS:
1675 return btrace_stitch_bts (&btrace->variant.bts, tp);
1677 case BTRACE_FORMAT_PT:
1678 /* Delta reads are not supported. */
1682 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1685 /* Clear the branch trace histories in BTINFO. */
1688 btrace_clear_history (struct btrace_thread_info *btinfo)
1690 xfree (btinfo->insn_history);
1691 xfree (btinfo->call_history);
1692 xfree (btinfo->replay);
1694 btinfo->insn_history = NULL;
1695 btinfo->call_history = NULL;
1696 btinfo->replay = NULL;
1699 /* Clear the branch trace maintenance histories in BTINFO. */
1702 btrace_maint_clear (struct btrace_thread_info *btinfo)
1704 switch (btinfo->data.format)
1709 case BTRACE_FORMAT_BTS:
1710 btinfo->maint.variant.bts.packet_history.begin = 0;
1711 btinfo->maint.variant.bts.packet_history.end = 0;
1714 #if defined (HAVE_LIBIPT)
1715 case BTRACE_FORMAT_PT:
1716 xfree (btinfo->maint.variant.pt.packets);
1718 btinfo->maint.variant.pt.packets = NULL;
1719 btinfo->maint.variant.pt.packet_history.begin = 0;
1720 btinfo->maint.variant.pt.packet_history.end = 0;
1722 #endif /* defined (HAVE_LIBIPT) */
1729 btrace_fetch (struct thread_info *tp)
1731 struct btrace_thread_info *btinfo;
1732 struct btrace_target_info *tinfo;
1733 struct btrace_data btrace;
1734 struct cleanup *cleanup;
1737 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1738 target_pid_to_str (tp->ptid));
1740 btinfo = &tp->btrace;
1741 tinfo = btinfo->target;
1745 /* There's no way we could get new trace while replaying.
1746 On the other hand, delta trace would return a partial record with the
1747 current PC, which is the replay PC, not the last PC, as expected. */
1748 if (btinfo->replay != NULL)
1751 /* We should not be called on running or exited threads. */
1752 gdb_assert (can_access_registers_ptid (tp->ptid));
1754 btrace_data_init (&btrace);
1755 cleanup = make_cleanup_btrace_data (&btrace);
1757 /* Let's first try to extend the trace we already have. */
1758 if (btinfo->end != NULL)
1760 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1763 /* Success. Let's try to stitch the traces together. */
1764 errcode = btrace_stitch_trace (&btrace, tp);
1768 /* We failed to read delta trace. Let's try to read new trace. */
1769 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1771 /* If we got any new trace, discard what we have. */
1772 if (errcode == 0 && !btrace_data_empty (&btrace))
1776 /* If we were not able to read the trace, we start over. */
1780 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1784 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1786 /* If we were not able to read the branch trace, signal an error. */
1788 error (_("Failed to read branch trace."));
1790 /* Compute the trace, provided we have any. */
1791 if (!btrace_data_empty (&btrace))
1793 /* Store the raw trace data. The stored data will be cleared in
1794 btrace_clear, so we always append the new trace. */
1795 btrace_data_append (&btinfo->data, &btrace);
1796 btrace_maint_clear (btinfo);
1798 btrace_clear_history (btinfo);
1799 btrace_compute_ftrace (tp, &btrace);
1802 do_cleanups (cleanup);
1808 btrace_clear (struct thread_info *tp)
1810 struct btrace_thread_info *btinfo;
1811 struct btrace_function *it, *trash;
1813 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1814 target_pid_to_str (tp->ptid));
1816 /* Make sure btrace frames that may hold a pointer into the branch
1817 trace data are destroyed. */
1818 reinit_frame_cache ();
1820 btinfo = &tp->btrace;
1831 btinfo->begin = NULL;
1835 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1836 btrace_maint_clear (btinfo);
1837 btrace_data_clear (&btinfo->data);
1838 btrace_clear_history (btinfo);
1844 btrace_free_objfile (struct objfile *objfile)
1846 struct thread_info *tp;
1848 DEBUG ("free objfile");
1850 ALL_NON_EXITED_THREADS (tp)
1854 #if defined (HAVE_LIBEXPAT)
1856 /* Check the btrace document version. */
1859 check_xml_btrace_version (struct gdb_xml_parser *parser,
1860 const struct gdb_xml_element *element,
1861 void *user_data, VEC (gdb_xml_value_s) *attributes)
1864 = (const char *) xml_find_attribute (attributes, "version")->value;
1866 if (strcmp (version, "1.0") != 0)
1867 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1870 /* Parse a btrace "block" xml record. */
1873 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1874 const struct gdb_xml_element *element,
1875 void *user_data, VEC (gdb_xml_value_s) *attributes)
1877 struct btrace_data *btrace;
1878 struct btrace_block *block;
1879 ULONGEST *begin, *end;
1881 btrace = (struct btrace_data *) user_data;
1883 switch (btrace->format)
1885 case BTRACE_FORMAT_BTS:
1888 case BTRACE_FORMAT_NONE:
1889 btrace->format = BTRACE_FORMAT_BTS;
1890 btrace->variant.bts.blocks = NULL;
1894 gdb_xml_error (parser, _("Btrace format error."));
1897 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1898 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1900 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1901 block->begin = *begin;
1905 /* Parse a "raw" xml record. */
1908 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1909 gdb_byte **pdata, size_t *psize)
1911 struct cleanup *cleanup;
1912 gdb_byte *data, *bin;
1915 len = strlen (body_text);
1917 gdb_xml_error (parser, _("Bad raw data size."));
1921 bin = data = (gdb_byte *) xmalloc (size);
1922 cleanup = make_cleanup (xfree, data);
1924 /* We use hex encoding - see common/rsp-low.h. */
1932 if (hi == 0 || lo == 0)
1933 gdb_xml_error (parser, _("Bad hex encoding."));
1935 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1939 discard_cleanups (cleanup);
1945 /* Parse a btrace pt-config "cpu" xml record. */
1948 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1949 const struct gdb_xml_element *element,
1951 VEC (gdb_xml_value_s) *attributes)
1953 struct btrace_data *btrace;
1955 ULONGEST *family, *model, *stepping;
1957 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1958 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1959 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1960 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1962 btrace = (struct btrace_data *) user_data;
1964 if (strcmp (vendor, "GenuineIntel") == 0)
1965 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1967 btrace->variant.pt.config.cpu.family = *family;
1968 btrace->variant.pt.config.cpu.model = *model;
1969 btrace->variant.pt.config.cpu.stepping = *stepping;
1972 /* Parse a btrace pt "raw" xml record. */
1975 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1976 const struct gdb_xml_element *element,
1977 void *user_data, const char *body_text)
1979 struct btrace_data *btrace;
1981 btrace = (struct btrace_data *) user_data;
1982 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1983 &btrace->variant.pt.size);
1986 /* Parse a btrace "pt" xml record. */
1989 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1990 const struct gdb_xml_element *element,
1991 void *user_data, VEC (gdb_xml_value_s) *attributes)
1993 struct btrace_data *btrace;
1995 btrace = (struct btrace_data *) user_data;
1996 btrace->format = BTRACE_FORMAT_PT;
1997 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1998 btrace->variant.pt.data = NULL;
1999 btrace->variant.pt.size = 0;
2002 static const struct gdb_xml_attribute block_attributes[] = {
2003 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2004 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2005 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2008 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2009 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2010 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2011 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2012 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2013 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2016 static const struct gdb_xml_element btrace_pt_config_children[] = {
2017 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2018 parse_xml_btrace_pt_config_cpu, NULL },
2019 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2022 static const struct gdb_xml_element btrace_pt_children[] = {
2023 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2025 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2026 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2029 static const struct gdb_xml_attribute btrace_attributes[] = {
2030 { "version", GDB_XML_AF_NONE, NULL, NULL },
2031 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2034 static const struct gdb_xml_element btrace_children[] = {
2035 { "block", block_attributes, NULL,
2036 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2037 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2039 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2042 static const struct gdb_xml_element btrace_elements[] = {
2043 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2044 check_xml_btrace_version, NULL },
2045 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2048 #endif /* defined (HAVE_LIBEXPAT) */
2053 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2055 struct cleanup *cleanup;
2058 #if defined (HAVE_LIBEXPAT)
2060 btrace->format = BTRACE_FORMAT_NONE;
2062 cleanup = make_cleanup_btrace_data (btrace);
2063 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2066 error (_("Error parsing branch trace."));
2068 /* Keep parse results. */
2069 discard_cleanups (cleanup);
2071 #else /* !defined (HAVE_LIBEXPAT) */
2073 error (_("Cannot process branch trace. XML parsing is not supported."));
2075 #endif /* !defined (HAVE_LIBEXPAT) */
2078 #if defined (HAVE_LIBEXPAT)
2080 /* Parse a btrace-conf "bts" xml record. */
2083 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2084 const struct gdb_xml_element *element,
2085 void *user_data, VEC (gdb_xml_value_s) *attributes)
2087 struct btrace_config *conf;
2088 struct gdb_xml_value *size;
2090 conf = (struct btrace_config *) user_data;
2091 conf->format = BTRACE_FORMAT_BTS;
2094 size = xml_find_attribute (attributes, "size");
2096 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2099 /* Parse a btrace-conf "pt" xml record. */
2102 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2103 const struct gdb_xml_element *element,
2104 void *user_data, VEC (gdb_xml_value_s) *attributes)
2106 struct btrace_config *conf;
2107 struct gdb_xml_value *size;
2109 conf = (struct btrace_config *) user_data;
2110 conf->format = BTRACE_FORMAT_PT;
2113 size = xml_find_attribute (attributes, "size");
2115 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2118 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2119 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2120 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2123 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2124 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2125 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2128 static const struct gdb_xml_element btrace_conf_children[] = {
2129 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2130 parse_xml_btrace_conf_bts, NULL },
2131 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2132 parse_xml_btrace_conf_pt, NULL },
2133 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2136 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2137 { "version", GDB_XML_AF_NONE, NULL, NULL },
2138 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2141 static const struct gdb_xml_element btrace_conf_elements[] = {
2142 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2143 GDB_XML_EF_NONE, NULL, NULL },
2144 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2147 #endif /* defined (HAVE_LIBEXPAT) */
2152 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2156 #if defined (HAVE_LIBEXPAT)
2158 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2159 btrace_conf_elements, xml, conf);
2161 error (_("Error parsing branch trace configuration."));
2163 #else /* !defined (HAVE_LIBEXPAT) */
2165 error (_("XML parsing is not supported."));
2167 #endif /* !defined (HAVE_LIBEXPAT) */
2172 const struct btrace_insn *
2173 btrace_insn_get (const struct btrace_insn_iterator *it)
2175 const struct btrace_function *bfun;
2176 unsigned int index, end;
2179 bfun = it->function;
2181 /* Check if the iterator points to a gap in the trace. */
2182 if (bfun->errcode != 0)
2185 /* The index is within the bounds of this function's instruction vector. */
2186 end = VEC_length (btrace_insn_s, bfun->insn);
2187 gdb_assert (0 < end);
2188 gdb_assert (index < end);
2190 return VEC_index (btrace_insn_s, bfun->insn, index);
2196 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2198 return it->function->errcode;
2204 btrace_insn_number (const struct btrace_insn_iterator *it)
2206 return it->function->insn_offset + it->index;
2212 btrace_insn_begin (struct btrace_insn_iterator *it,
2213 const struct btrace_thread_info *btinfo)
2215 const struct btrace_function *bfun;
2217 bfun = btinfo->begin;
2219 error (_("No trace."));
2221 it->function = bfun;
2228 btrace_insn_end (struct btrace_insn_iterator *it,
2229 const struct btrace_thread_info *btinfo)
2231 const struct btrace_function *bfun;
2232 unsigned int length;
2236 error (_("No trace."));
2238 length = VEC_length (btrace_insn_s, bfun->insn);
2240 /* The last function may either be a gap or it contains the current
2241 instruction, which is one past the end of the execution trace; ignore
2246 it->function = bfun;
2253 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2255 const struct btrace_function *bfun;
2256 unsigned int index, steps;
2258 bfun = it->function;
2264 unsigned int end, space, adv;
2266 end = VEC_length (btrace_insn_s, bfun->insn);
2268 /* An empty function segment represents a gap in the trace. We count
2269 it as one instruction. */
2272 const struct btrace_function *next;
2274 next = bfun->flow.next;
2287 gdb_assert (0 < end);
2288 gdb_assert (index < end);
2290 /* Compute the number of instructions remaining in this segment. */
2291 space = end - index;
2293 /* Advance the iterator as far as possible within this segment. */
2294 adv = std::min (space, stride);
2299 /* Move to the next function if we're at the end of this one. */
2302 const struct btrace_function *next;
2304 next = bfun->flow.next;
2307 /* We stepped past the last function.
2309 Let's adjust the index to point to the last instruction in
2310 the previous function. */
2316 /* We now point to the first instruction in the new function. */
2321 /* We did make progress. */
2322 gdb_assert (adv > 0);
2325 /* Update the iterator. */
2326 it->function = bfun;
2335 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2337 const struct btrace_function *bfun;
2338 unsigned int index, steps;
2340 bfun = it->function;
2348 /* Move to the previous function if we're at the start of this one. */
2351 const struct btrace_function *prev;
2353 prev = bfun->flow.prev;
2357 /* We point to one after the last instruction in the new function. */
2359 index = VEC_length (btrace_insn_s, bfun->insn);
2361 /* An empty function segment represents a gap in the trace. We count
2362 it as one instruction. */
2372 /* Advance the iterator as far as possible within this segment. */
2373 adv = std::min (index, stride);
2379 /* We did make progress. */
2380 gdb_assert (adv > 0);
2383 /* Update the iterator. */
2384 it->function = bfun;
2393 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2394 const struct btrace_insn_iterator *rhs)
2396 unsigned int lnum, rnum;
2398 lnum = btrace_insn_number (lhs);
2399 rnum = btrace_insn_number (rhs);
2401 return (int) (lnum - rnum);
2407 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2408 const struct btrace_thread_info *btinfo,
2409 unsigned int number)
2411 const struct btrace_function *bfun;
2413 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2414 if (bfun->insn_offset <= number)
2420 if (bfun->insn_offset + ftrace_call_num_insn (bfun) <= number)
2423 it->function = bfun;
2424 it->index = number - bfun->insn_offset;
2431 const struct btrace_function *
2432 btrace_call_get (const struct btrace_call_iterator *it)
2434 return it->function;
2440 btrace_call_number (const struct btrace_call_iterator *it)
2442 const struct btrace_thread_info *btinfo;
2443 const struct btrace_function *bfun;
2446 btinfo = it->btinfo;
2447 bfun = it->function;
2449 return bfun->number;
2451 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2452 number of the last function. */
2454 insns = VEC_length (btrace_insn_s, bfun->insn);
2456 /* If the function contains only a single instruction (i.e. the current
2457 instruction), it will be skipped and its number is already the number
2460 return bfun->number;
2462 /* Otherwise, return one more than the number of the last function. */
2463 return bfun->number + 1;
2469 btrace_call_begin (struct btrace_call_iterator *it,
2470 const struct btrace_thread_info *btinfo)
2472 const struct btrace_function *bfun;
2474 bfun = btinfo->begin;
2476 error (_("No trace."));
2478 it->btinfo = btinfo;
2479 it->function = bfun;
2485 btrace_call_end (struct btrace_call_iterator *it,
2486 const struct btrace_thread_info *btinfo)
2488 const struct btrace_function *bfun;
2492 error (_("No trace."));
2494 it->btinfo = btinfo;
2495 it->function = NULL;
2501 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2503 const struct btrace_function *bfun;
2506 bfun = it->function;
2508 while (bfun != NULL)
2510 const struct btrace_function *next;
2513 next = bfun->flow.next;
2516 /* Ignore the last function if it only contains a single
2517 (i.e. the current) instruction. */
2518 insns = VEC_length (btrace_insn_s, bfun->insn);
2523 if (stride == steps)
2530 it->function = bfun;
2537 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2539 const struct btrace_thread_info *btinfo;
2540 const struct btrace_function *bfun;
2543 bfun = it->function;
2550 btinfo = it->btinfo;
2555 /* Ignore the last function if it only contains a single
2556 (i.e. the current) instruction. */
2557 insns = VEC_length (btrace_insn_s, bfun->insn);
2559 bfun = bfun->flow.prev;
2567 while (steps < stride)
2569 const struct btrace_function *prev;
2571 prev = bfun->flow.prev;
2579 it->function = bfun;
2586 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2587 const struct btrace_call_iterator *rhs)
2589 unsigned int lnum, rnum;
2591 lnum = btrace_call_number (lhs);
2592 rnum = btrace_call_number (rhs);
2594 return (int) (lnum - rnum);
2600 btrace_find_call_by_number (struct btrace_call_iterator *it,
2601 const struct btrace_thread_info *btinfo,
2602 unsigned int number)
2604 const struct btrace_function *bfun;
2606 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2610 bnum = bfun->number;
2613 it->btinfo = btinfo;
2614 it->function = bfun;
2618 /* Functions are ordered and numbered consecutively. We could bail out
2619 earlier. On the other hand, it is very unlikely that we search for
2620 a nonexistent function. */
2629 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2630 const struct btrace_insn_iterator *begin,
2631 const struct btrace_insn_iterator *end)
2633 if (btinfo->insn_history == NULL)
2634 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2636 btinfo->insn_history->begin = *begin;
2637 btinfo->insn_history->end = *end;
2643 btrace_set_call_history (struct btrace_thread_info *btinfo,
2644 const struct btrace_call_iterator *begin,
2645 const struct btrace_call_iterator *end)
2647 gdb_assert (begin->btinfo == end->btinfo);
2649 if (btinfo->call_history == NULL)
2650 btinfo->call_history = XCNEW (struct btrace_call_history);
2652 btinfo->call_history->begin = *begin;
2653 btinfo->call_history->end = *end;
2659 btrace_is_replaying (struct thread_info *tp)
2661 return tp->btrace.replay != NULL;
2667 btrace_is_empty (struct thread_info *tp)
2669 struct btrace_insn_iterator begin, end;
2670 struct btrace_thread_info *btinfo;
2672 btinfo = &tp->btrace;
2674 if (btinfo->begin == NULL)
2677 btrace_insn_begin (&begin, btinfo);
2678 btrace_insn_end (&end, btinfo);
2680 return btrace_insn_cmp (&begin, &end) == 0;
2683 /* Forward the cleanup request. */
2686 do_btrace_data_cleanup (void *arg)
2688 btrace_data_fini ((struct btrace_data *) arg);
2694 make_cleanup_btrace_data (struct btrace_data *data)
2696 return make_cleanup (do_btrace_data_cleanup, data);
2699 #if defined (HAVE_LIBIPT)
2701 /* Print a single packet. */
2704 pt_print_packet (const struct pt_packet *packet)
2706 switch (packet->type)
2709 printf_unfiltered (("[??: %x]"), packet->type);
2713 printf_unfiltered (("psb"));
2717 printf_unfiltered (("psbend"));
2721 printf_unfiltered (("pad"));
2725 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2726 packet->payload.ip.ipc,
2727 packet->payload.ip.ip);
2731 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2732 packet->payload.ip.ipc,
2733 packet->payload.ip.ip);
2737 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2738 packet->payload.ip.ipc,
2739 packet->payload.ip.ip);
2743 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2744 packet->payload.ip.ipc,
2745 packet->payload.ip.ip);
2749 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2750 packet->payload.tnt.bit_size,
2751 packet->payload.tnt.payload);
2755 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2756 packet->payload.tnt.bit_size,
2757 packet->payload.tnt.payload);
2761 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2762 packet->payload.pip.nr ? (" nr") : (""));
2766 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2770 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2774 switch (packet->payload.mode.leaf)
2777 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2781 printf_unfiltered (("mode.exec%s%s"),
2782 packet->payload.mode.bits.exec.csl
2784 packet->payload.mode.bits.exec.csd
2785 ? (" cs.d") : (""));
2789 printf_unfiltered (("mode.tsx%s%s"),
2790 packet->payload.mode.bits.tsx.intx
2792 packet->payload.mode.bits.tsx.abrt
2793 ? (" abrt") : (""));
2799 printf_unfiltered (("ovf"));
2803 printf_unfiltered (("stop"));
2807 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2811 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2812 packet->payload.tma.fc);
2816 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2820 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2824 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2829 /* Decode packets into MAINT using DECODER. */
2832 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2833 struct pt_packet_decoder *decoder)
2839 struct btrace_pt_packet packet;
2841 errcode = pt_pkt_sync_forward (decoder);
2847 pt_pkt_get_offset (decoder, &packet.offset);
2849 errcode = pt_pkt_next (decoder, &packet.packet,
2850 sizeof(packet.packet));
2854 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2856 packet.errcode = pt_errcode (errcode);
2857 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2862 if (errcode == -pte_eos)
2865 packet.errcode = pt_errcode (errcode);
2866 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2869 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2870 packet.offset, pt_errstr (packet.errcode));
2873 if (errcode != -pte_eos)
2874 warning (_("Failed to synchronize onto the Intel Processor Trace "
2875 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2878 /* Update the packet history in BTINFO. */
2881 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2883 volatile struct gdb_exception except;
2884 struct pt_packet_decoder *decoder;
2885 struct btrace_data_pt *pt;
2886 struct pt_config config;
2889 pt = &btinfo->data.variant.pt;
2891 /* Nothing to do if there is no trace. */
2895 memset (&config, 0, sizeof(config));
2897 config.size = sizeof (config);
2898 config.begin = pt->data;
2899 config.end = pt->data + pt->size;
2901 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2902 config.cpu.family = pt->config.cpu.family;
2903 config.cpu.model = pt->config.cpu.model;
2904 config.cpu.stepping = pt->config.cpu.stepping;
2906 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2908 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2909 pt_errstr (pt_errcode (errcode)));
2911 decoder = pt_pkt_alloc_decoder (&config);
2912 if (decoder == NULL)
2913 error (_("Failed to allocate the Intel Processor Trace decoder."));
2917 btrace_maint_decode_pt (&btinfo->maint, decoder);
2919 CATCH (except, RETURN_MASK_ALL)
2921 pt_pkt_free_decoder (decoder);
2923 if (except.reason < 0)
2924 throw_exception (except);
2928 pt_pkt_free_decoder (decoder);
2931 #endif /* !defined (HAVE_LIBIPT) */
2933 /* Update the packet maintenance information for BTINFO and store the
2934 low and high bounds into BEGIN and END, respectively.
2935 Store the current iterator state into FROM and TO. */
2938 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2939 unsigned int *begin, unsigned int *end,
2940 unsigned int *from, unsigned int *to)
2942 switch (btinfo->data.format)
2951 case BTRACE_FORMAT_BTS:
2952 /* Nothing to do - we operate directly on BTINFO->DATA. */
2954 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2955 *from = btinfo->maint.variant.bts.packet_history.begin;
2956 *to = btinfo->maint.variant.bts.packet_history.end;
2959 #if defined (HAVE_LIBIPT)
2960 case BTRACE_FORMAT_PT:
2961 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2962 btrace_maint_update_pt_packets (btinfo);
2965 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2966 *from = btinfo->maint.variant.pt.packet_history.begin;
2967 *to = btinfo->maint.variant.pt.packet_history.end;
2969 #endif /* defined (HAVE_LIBIPT) */
2973 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2974 update the current iterator position. */
2977 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2978 unsigned int begin, unsigned int end)
2980 switch (btinfo->data.format)
2985 case BTRACE_FORMAT_BTS:
2987 VEC (btrace_block_s) *blocks;
2990 blocks = btinfo->data.variant.bts.blocks;
2991 for (blk = begin; blk < end; ++blk)
2993 const btrace_block_s *block;
2995 block = VEC_index (btrace_block_s, blocks, blk);
2997 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2998 core_addr_to_string_nz (block->begin),
2999 core_addr_to_string_nz (block->end));
3002 btinfo->maint.variant.bts.packet_history.begin = begin;
3003 btinfo->maint.variant.bts.packet_history.end = end;
3007 #if defined (HAVE_LIBIPT)
3008 case BTRACE_FORMAT_PT:
3010 VEC (btrace_pt_packet_s) *packets;
3013 packets = btinfo->maint.variant.pt.packets;
3014 for (pkt = begin; pkt < end; ++pkt)
3016 const struct btrace_pt_packet *packet;
3018 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3020 printf_unfiltered ("%u\t", pkt);
3021 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3023 if (packet->errcode == pte_ok)
3024 pt_print_packet (&packet->packet);
3026 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3028 printf_unfiltered ("\n");
3031 btinfo->maint.variant.pt.packet_history.begin = begin;
3032 btinfo->maint.variant.pt.packet_history.end = end;
3035 #endif /* defined (HAVE_LIBIPT) */
3039 /* Read a number from an argument string. */
3042 get_uint (char **arg)
3044 char *begin, *end, *pos;
3045 unsigned long number;
3048 pos = skip_spaces (begin);
3050 if (!isdigit (*pos))
3051 error (_("Expected positive number, got: %s."), pos);
3053 number = strtoul (pos, &end, 10);
3054 if (number > UINT_MAX)
3055 error (_("Number too big."));
3057 *arg += (end - begin);
3059 return (unsigned int) number;
3062 /* Read a context size from an argument string. */
3065 get_context_size (char **arg)
3070 pos = skip_spaces (*arg);
3072 if (!isdigit (*pos))
3073 error (_("Expected positive number, got: %s."), pos);
3075 return strtol (pos, arg, 10);
3078 /* Complain about junk at the end of an argument string. */
3081 no_chunk (char *arg)
3084 error (_("Junk after argument: %s."), arg);
3087 /* The "maintenance btrace packet-history" command. */
3090 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3092 struct btrace_thread_info *btinfo;
3093 struct thread_info *tp;
3094 unsigned int size, begin, end, from, to;
3096 tp = find_thread_ptid (inferior_ptid);
3098 error (_("No thread."));
3101 btinfo = &tp->btrace;
3103 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3106 printf_unfiltered (_("No trace.\n"));
3110 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3114 if (end - from < size)
3118 else if (strcmp (arg, "-") == 0)
3122 if (to - begin < size)
3128 from = get_uint (&arg);
3130 error (_("'%u' is out of range."), from);
3132 arg = skip_spaces (arg);
3135 arg = skip_spaces (++arg);
3140 size = get_context_size (&arg);
3144 if (end - from < size)
3148 else if (*arg == '-')
3151 size = get_context_size (&arg);
3155 /* Include the packet given as first argument. */
3159 if (to - begin < size)
3165 to = get_uint (&arg);
3167 /* Include the packet at the second argument and silently
3168 truncate the range. */
3181 if (end - from < size)
3189 btrace_maint_print_packets (btinfo, from, to);
3192 /* The "maintenance btrace clear-packet-history" command. */
3195 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3197 struct btrace_thread_info *btinfo;
3198 struct thread_info *tp;
3200 if (args != NULL && *args != 0)
3201 error (_("Invalid argument."));
3203 tp = find_thread_ptid (inferior_ptid);
3205 error (_("No thread."));
3207 btinfo = &tp->btrace;
3209 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3210 btrace_maint_clear (btinfo);
3211 btrace_data_clear (&btinfo->data);
3214 /* The "maintenance btrace clear" command. */
3217 maint_btrace_clear_cmd (char *args, int from_tty)
3219 struct btrace_thread_info *btinfo;
3220 struct thread_info *tp;
3222 if (args != NULL && *args != 0)
3223 error (_("Invalid argument."));
3225 tp = find_thread_ptid (inferior_ptid);
3227 error (_("No thread."));
3232 /* The "maintenance btrace" command. */
3235 maint_btrace_cmd (char *args, int from_tty)
3237 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3241 /* The "maintenance set btrace" command. */
3244 maint_btrace_set_cmd (char *args, int from_tty)
3246 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3250 /* The "maintenance show btrace" command. */
3253 maint_btrace_show_cmd (char *args, int from_tty)
3255 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3256 all_commands, gdb_stdout);
3259 /* The "maintenance set btrace pt" command. */
3262 maint_btrace_pt_set_cmd (char *args, int from_tty)
3264 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3265 all_commands, gdb_stdout);
3268 /* The "maintenance show btrace pt" command. */
3271 maint_btrace_pt_show_cmd (char *args, int from_tty)
3273 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3274 all_commands, gdb_stdout);
3277 /* The "maintenance info btrace" command. */
3280 maint_info_btrace_cmd (char *args, int from_tty)
3282 struct btrace_thread_info *btinfo;
3283 struct thread_info *tp;
3284 const struct btrace_config *conf;
3286 if (args != NULL && *args != 0)
3287 error (_("Invalid argument."));
3289 tp = find_thread_ptid (inferior_ptid);
3291 error (_("No thread."));
3293 btinfo = &tp->btrace;
3295 conf = btrace_conf (btinfo);
3297 error (_("No btrace configuration."));
3299 printf_unfiltered (_("Format: %s.\n"),
3300 btrace_format_string (conf->format));
3302 switch (conf->format)
3307 case BTRACE_FORMAT_BTS:
3308 printf_unfiltered (_("Number of packets: %u.\n"),
3309 VEC_length (btrace_block_s,
3310 btinfo->data.variant.bts.blocks));
3313 #if defined (HAVE_LIBIPT)
3314 case BTRACE_FORMAT_PT:
3316 struct pt_version version;
3318 version = pt_library_version ();
3319 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3320 version.minor, version.build,
3321 version.ext != NULL ? version.ext : "");
3323 btrace_maint_update_pt_packets (btinfo);
3324 printf_unfiltered (_("Number of packets: %u.\n"),
3325 VEC_length (btrace_pt_packet_s,
3326 btinfo->maint.variant.pt.packets));
3329 #endif /* defined (HAVE_LIBIPT) */
3333 /* The "maint show btrace pt skip-pad" show value function. */
3336 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3337 struct cmd_list_element *c,
3340 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3344 /* Initialize btrace maintenance commands. */
3346 void _initialize_btrace (void);
3348 _initialize_btrace (void)
3350 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3351 _("Info about branch tracing data."), &maintenanceinfolist);
3353 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3354 _("Branch tracing maintenance commands."),
3355 &maint_btrace_cmdlist, "maintenance btrace ",
3356 0, &maintenancelist);
3358 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3359 Set branch tracing specific variables."),
3360 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3361 0, &maintenance_set_cmdlist);
3363 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3364 Set Intel Processor Trace specific variables."),
3365 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3366 0, &maint_btrace_set_cmdlist);
3368 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3369 Show branch tracing specific variables."),
3370 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3371 0, &maintenance_show_cmdlist);
3373 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3374 Show Intel Processor Trace specific variables."),
3375 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3376 0, &maint_btrace_show_cmdlist);
3378 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3379 &maint_btrace_pt_skip_pad, _("\
3380 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3381 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3382 When enabled, PAD packets are ignored in the btrace packet history."),
3383 NULL, show_maint_btrace_pt_skip_pad,
3384 &maint_btrace_pt_set_cmdlist,
3385 &maint_btrace_pt_show_cmdlist);
3387 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3388 _("Print the raw branch tracing data.\n\
3389 With no argument, print ten more packets after the previous ten-line print.\n\
3390 With '-' as argument print ten packets before a previous ten-line print.\n\
3391 One argument specifies the starting packet of a ten-line print.\n\
3392 Two arguments with comma between specify starting and ending packets to \
3394 Preceded with '+'/'-' the second argument specifies the distance from the \
3396 &maint_btrace_cmdlist);
3398 add_cmd ("clear-packet-history", class_maintenance,
3399 maint_btrace_clear_packet_history_cmd,
3400 _("Clears the branch tracing packet history.\n\
3401 Discards the raw branch tracing data but not the execution history data.\n\
3403 &maint_btrace_cmdlist);
3405 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3406 _("Clears the branch tracing data.\n\
3407 Discards the raw branch tracing data and the execution history data.\n\
3408 The next 'record' command will fetch the branch tracing data anew.\n\
3410 &maint_btrace_cmdlist);