size_t log_size;
size_t used;
size_t dtor_count; /* Counts how many times our destructor has already
- been called. */
+ been called. */
} unw_trace_cache_t;
static const unw_tdep_frame_t empty_frame = { 0, UNW_X86_64_FRAME_OTHER, -1, -1, 0, -1, -1 };
/* Not yet our turn to get destroyed. Re-install ourselves into the key. */
pthread_setspecific(trace_cache_key, cache);
Debug(5, "delayed freeing cache %p (%zx to go)\n", cache,
- PTHREAD_DESTRUCTOR_ITERATIONS - cache->dtor_count);
+ PTHREAD_DESTRUCTOR_ITERATIONS - cache->dtor_count);
return;
}
tls_cache_destroyed = 1;
/* The current thread is in the process of exiting. Don't recreate
cache, as we wouldn't have another chance to free it. */
Debug(5, "refusing to reallocate cache: "
- "thread-locals are being deallocated\n");
+ "thread-locals are being deallocated\n");
return NULL;
}
highly unusual unwind info which uses these creatively. */
static unw_tdep_frame_t *
trace_init_addr (unw_tdep_frame_t *f,
- unw_cursor_t *cursor,
- unw_word_t cfa,
- unw_word_t rip,
- unw_word_t rbp,
- unw_word_t rsp)
+ unw_cursor_t *cursor,
+ unw_word_t cfa,
+ unw_word_t rip,
+ unw_word_t rbp,
+ unw_word_t rsp)
{
struct cursor *c = (struct cursor *) cursor;
struct dwarf_cursor *d = &c->dwarf;
f->last_frame = -1;
Debug (3, "frame va %lx type %d last %d cfa %s+%d rbp @ cfa%+d rsp @ cfa%+d\n",
- f->virtual_address, f->frame_type, f->last_frame,
- f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset,
- f->rbp_cfa_offset, f->rsp_cfa_offset);
+ f->virtual_address, f->frame_type, f->last_frame,
+ f->cfa_reg_rsp ? "rsp" : "rbp", f->cfa_reg_offset,
+ f->rbp_cfa_offset, f->rsp_cfa_offset);
return f;
}
frame cache slot which describes RIP. */
static unw_tdep_frame_t *
trace_lookup (unw_cursor_t *cursor,
- unw_trace_cache_t *cache,
- unw_word_t cfa,
- unw_word_t rip,
- unw_word_t rbp,
- unw_word_t rsp)
+ unw_trace_cache_t *cache,
+ unw_word_t cfa,
+ unw_word_t rip,
+ unw_word_t rbp,
+ unw_word_t rsp)
{
/* First look up for previously cached information using cache as
linear probing hash table with probe step of 1. Majority of
{
rip -= d->use_prev_instr;
Debug (2, "depth %d cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n",
- depth, cfa, rip, rsp, rbp);
+ depth, cfa, rip, rsp, rbp);
/* See if we have this address cached. If not, evaluate enough of
the dwarf unwind information to fill the cache line data, or to
cfa = (f->cfa_reg_rsp ? rsp : rbp) + f->cfa_reg_offset;
ACCESS_MEM_FAST(ret, c->validate, d, cfa - 8, rip);
if (likely(ret >= 0) && likely(f->rbp_cfa_offset != -1))
- ACCESS_MEM_FAST(ret, c->validate, d, cfa + f->rbp_cfa_offset, rbp);
+ ACCESS_MEM_FAST(ret, c->validate, d, cfa + f->rbp_cfa_offset, rbp);
/* Don't bother reading RSP from DWARF, CFA becomes new RSP. */
rsp = cfa;
d->use_prev_instr = 0;
break;
+ case UNW_X86_64_FRAME_ALIGNED:
+ /* Address of RIP was pushed on the stack via a simple
+ * def_cfa_expr - result stack offset stored in cfa_reg_offset */
+ cfa = (f->cfa_reg_rsp ? rsp : rbp) + f->cfa_reg_offset;
+ ACCESS_MEM_FAST(ret, c->validate, d, cfa, cfa);
+ if (likely(ret >= 0))
+ ACCESS_MEM_FAST(ret, c->validate, d, cfa - 8, rip);
+ if (likely(ret >= 0))
+ ACCESS_MEM_FAST(ret, c->validate, d, rbp, rbp);
+
+ /* Don't bother reading RSP from DWARF, CFA becomes new RSP. */
+ rsp = cfa;
+
+ /* Next frame needs to back up for unwind info lookup. */
+ d->use_prev_instr = 1;
+
+ break;
+
default:
/* We cannot trace through this frame, give up and tell the
- caller we had to stop. Data collected so far may still be
- useful to the caller, so let it know how far we got. */
+ caller we had to stop. Data collected so far may still be
+ useful to the caller, so let it know how far we got. */
ret = -UNW_ESTOPUNWIND;
break;
}
Debug (4, "new cfa 0x%lx rip 0x%lx rsp 0x%lx rbp 0x%lx\n",
- cfa, rip, rsp, rbp);
+ cfa, rip, rsp, rbp);
/* If we failed or ended up somewhere bogus, stop. */
if (unlikely(ret < 0 || rip < 0x4000))