}
/* Allocate and initialize a new branch trace function segment.
+ BTINFO is the branch trace information for the current thread.
PREV is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_function (struct btrace_function *prev,
+ftrace_new_function (struct btrace_thread_info *btinfo,
+ struct btrace_function *prev,
struct minimal_symbol *mfun,
struct symbol *fun)
{
bfun->level = prev->level;
}
+ btinfo->functions.push_back (bfun);
return bfun;
}
}
/* Add a new function segment for a call.
+ BTINFO is the branch trace information for the current thread.
CALLER is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_call (struct btrace_function *caller,
+ftrace_new_call (struct btrace_thread_info *btinfo,
+ struct btrace_function *caller,
struct minimal_symbol *mfun,
struct symbol *fun)
{
struct btrace_function *bfun;
- bfun = ftrace_new_function (caller, mfun, fun);
+ bfun = ftrace_new_function (btinfo, caller, mfun, fun);
bfun->up = caller;
bfun->level += 1;
}
/* Add a new function segment for a tail call.
+ BTINFO is the branch trace information for the current thread.
CALLER is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_tailcall (struct btrace_function *caller,
+ftrace_new_tailcall (struct btrace_thread_info *btinfo,
+ struct btrace_function *caller,
struct minimal_symbol *mfun,
struct symbol *fun)
{
struct btrace_function *bfun;
- bfun = ftrace_new_function (caller, mfun, fun);
+ bfun = ftrace_new_function (btinfo, caller, mfun, fun);
bfun->up = caller;
bfun->level += 1;
bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
}
/* Add a continuation segment for a function into which we return.
+ BTINFO is the branch trace information for the current thread.
PREV is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_return (struct btrace_function *prev,
+ftrace_new_return (struct btrace_thread_info *btinfo,
+ struct btrace_function *prev,
struct minimal_symbol *mfun,
struct symbol *fun)
{
struct btrace_function *bfun, *caller;
- bfun = ftrace_new_function (prev, mfun, fun);
+ bfun = ftrace_new_function (btinfo, prev, mfun, fun);
/* It is important to start at PREV's caller. Otherwise, we might find
PREV itself, if PREV is a recursive function. */
}
/* Add a new function segment for a function switch.
+ BTINFO is the branch trace information for the current thread.
PREV is the chronologically preceding function segment.
MFUN and FUN are the symbol information we have for this function. */
static struct btrace_function *
-ftrace_new_switch (struct btrace_function *prev,
+ftrace_new_switch (struct btrace_thread_info *btinfo,
+ struct btrace_function *prev,
struct minimal_symbol *mfun,
struct symbol *fun)
{
/* This is an unexplained function switch. We can't really be sure about the
call stack, yet the best I can think of right now is to preserve it. */
- bfun = ftrace_new_function (prev, mfun, fun);
+ bfun = ftrace_new_function (btinfo, prev, mfun, fun);
bfun->up = prev->up;
bfun->flags = prev->flags;
}
/* Add a new function segment for a gap in the trace due to a decode error.
+ BTINFO is the branch trace information for the current thread.
PREV is the chronologically preceding function segment.
ERRCODE is the format-specific error code. */
static struct btrace_function *
-ftrace_new_gap (struct btrace_function *prev, int errcode)
+ftrace_new_gap (struct btrace_thread_info *btinfo,
+ struct btrace_function *prev, int errcode)
{
struct btrace_function *bfun;
&& VEC_empty (btrace_insn_s, prev->insn))
bfun = prev;
else
- bfun = ftrace_new_function (prev, NULL, NULL);
+ bfun = ftrace_new_function (btinfo, prev, NULL, NULL);
bfun->errcode = errcode;
return bfun;
}
-/* Update BFUN with respect to the instruction at PC. This may create new
- function segments.
+/* Update BFUN with respect to the instruction at PC. BTINFO is the branch
+ trace information for the current thread. This may create new function
+ segments.
Return the chronologically latest function segment, never NULL. */
static struct btrace_function *
-ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
+ftrace_update_function (struct btrace_thread_info *btinfo,
+ struct btrace_function *bfun, CORE_ADDR pc)
{
struct bound_minimal_symbol bmfun;
struct minimal_symbol *mfun;
/* If we didn't have a function or if we had a gap before, we create one. */
if (bfun == NULL || bfun->errcode != 0)
- return ftrace_new_function (bfun, mfun, fun);
+ return ftrace_new_function (btinfo, bfun, mfun, fun);
/* Check the last instruction, if we have one.
We do this check first, since it allows us to fill in the call stack
different frame id's. This will confuse stepping. */
fname = ftrace_print_function_name (bfun);
if (strcmp (fname, "_dl_runtime_resolve") == 0)
- return ftrace_new_tailcall (bfun, mfun, fun);
+ return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
- return ftrace_new_return (bfun, mfun, fun);
+ return ftrace_new_return (btinfo, bfun, mfun, fun);
}
case BTRACE_INSN_CALL:
if (last->pc + last->size == pc)
break;
- return ftrace_new_call (bfun, mfun, fun);
+ return ftrace_new_call (btinfo, bfun, mfun, fun);
case BTRACE_INSN_JUMP:
{
/* A jump to the start of a function is (typically) a tail call. */
if (start == pc)
- return ftrace_new_tailcall (bfun, mfun, fun);
+ return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
/* If we can't determine the function for PC, we treat a jump at
the end of the block as tail call if we're switching functions
and as an intra-function branch if we don't. */
if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
- return ftrace_new_tailcall (bfun, mfun, fun);
+ return ftrace_new_tailcall (btinfo, bfun, mfun, fun);
break;
}
ftrace_print_function_name (bfun),
ftrace_print_filename (bfun));
- return ftrace_new_switch (bfun, mfun, fun);
+ return ftrace_new_switch (btinfo, bfun, mfun, fun);
}
return bfun;
if (block->end < pc)
{
/* Indicate the gap in the trace. */
- end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
+ end = ftrace_new_gap (btinfo, end, BDE_BTS_OVERFLOW);
if (begin == NULL)
begin = end;
break;
}
- end = ftrace_update_function (end, pc);
+ end = ftrace_update_function (btinfo, end, pc);
if (begin == NULL)
begin = end;
{
/* Indicate the gap in the trace. We just added INSN so we're
not at the beginning. */
- end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
+ end = ftrace_new_gap (btinfo, end, BDE_BTS_INSN_SIZE);
VEC_safe_push (bfun_s, *gaps, end);
}
-/* Add function branch trace using DECODER. */
+/* Add function branch trace to BTINFO using DECODER. */
static void
-ftrace_add_pt (struct pt_insn_decoder *decoder,
+ftrace_add_pt (struct btrace_thread_info *btinfo,
+ struct pt_insn_decoder *decoder,
struct btrace_function **pbegin,
struct btrace_function **pend, int *plevel,
VEC (bfun_s) **gaps)
from some other instruction. Indicate this as a trace gap. */
if (insn.enabled)
{
- *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
+ *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_DISABLED);
VEC_safe_push (bfun_s, *gaps, end);
/* Indicate trace overflows. */
if (insn.resynced)
{
- *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
+ *pend = end = ftrace_new_gap (btinfo, end, BDE_PT_OVERFLOW);
if (begin == NULL)
*pbegin = begin = end;
offset, insn.ip);
}
- upd = ftrace_update_function (end, insn.ip);
+ upd = ftrace_update_function (btinfo, end, insn.ip);
if (upd != end)
{
*pend = end = upd;
break;
/* Indicate the gap in the trace. */
- *pend = end = ftrace_new_gap (end, errcode);
+ *pend = end = ftrace_new_gap (btinfo, end, errcode);
if (begin == NULL)
*pbegin = begin = end;
error (_("Failed to configure the Intel Processor Trace decoder: "
"%s."), pt_errstr (pt_errcode (errcode)));
- ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level, gaps);
+ ftrace_add_pt (btinfo, decoder, &btinfo->begin, &btinfo->end, &level,
+ gaps);
}
CATCH (error, RETURN_MASK_ALL)
{
/* Indicate a gap in the trace if we quit trace processing. */
if (error.reason == RETURN_QUIT && btinfo->end != NULL)
{
- btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
+ btinfo->end = ftrace_new_gap (btinfo, btinfo->end, BDE_PT_USER_QUIT);
VEC_safe_push (bfun_s, *gaps, btinfo->end);
}
/* Compute the trace, provided we have any. */
if (!btrace_data_empty (&btrace))
{
- struct btrace_function *bfun;
-
/* Store the raw trace data. The stored data will be cleared in
btrace_clear, so we always append the new trace. */
btrace_data_append (&btinfo->data, &btrace);
btrace_maint_clear (btinfo);
- btinfo->functions.clear ();
btrace_clear_history (btinfo);
btrace_compute_ftrace (tp, &btrace);
-
- for (bfun = btinfo->begin; bfun != NULL; bfun = bfun->flow.next)
- btinfo->functions.push_back (bfun);
}
do_cleanups (cleanup);
btrace_clear (struct thread_info *tp)
{
struct btrace_thread_info *btinfo;
- struct btrace_function *it, *trash;
DEBUG ("clear thread %s (%s)", print_thread_id (tp),
target_pid_to_str (tp->ptid));
reinit_frame_cache ();
btinfo = &tp->btrace;
- btinfo->functions.clear ();
-
- it = btinfo->begin;
- while (it != NULL)
+ for (auto &bfun : btinfo->functions)
{
- trash = it;
- it = it->flow.next;
-
- VEC_free (btrace_insn_s, trash->insn);
- xfree (trash);
+ VEC_free (btrace_insn_s, bfun->insn);
+ xfree (bfun);
}
+ btinfo->functions.clear ();
btinfo->begin = NULL;
btinfo->end = NULL;
btinfo->ngaps = 0;