uptr allocated_size = malloc_info.AllocationSize((uptr)p);
// Die if p is not malloced or if it is already freed.
if (allocated_size == 0) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_FATAL_HERE;
ReportAsanGetAllocatedSizeNotOwned((uptr)p, &stack);
}
return allocated_size;
int report_globals;
// If set, attempts to catch initialization order issues.
bool check_initialization_order;
- // Max number of stack frames kept for each allocation.
+ // Max number of stack frames kept for each allocation/deallocation.
int malloc_context_size;
// If set, uses custom wrappers and replacements for libc string functions
// to find more errors.
bool print_full_thread_history;
// ASan will write logs to "log_path.pid" instead of stderr.
const char *log_path;
+ // Use fast (frame-pointer-based) unwinder on fatal errors (if available).
+ bool fast_unwind_on_fatal;
+ // Use fast (frame-pointer-based) unwinder on malloc/free (if available).
+ bool fast_unwind_on_malloc;
};
Flags *flags();
const char *offset1 = (const char*)_offset1; \
const char *offset2 = (const char*)_offset2; \
if (RangesOverlap(offset1, length1, offset2, length2)) { \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
offset2, length2, &stack); \
} \
#if ASAN_INTERCEPT_PTHREAD_CREATE
INTERCEPTOR(int, pthread_create, void *thread,
void *attr, void *(*start_routine)(void*), void *arg) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t);
void* security, uptr stack_size,
DWORD (__stdcall *start_routine)(void*), void* arg,
DWORD flags, void* tid) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
u32 current_tid = asanThreadRegistry().GetCurrentTidOrInvalid();
AsanThread *t = AsanThread::Create(current_tid, start_routine, arg, &stack);
asanThreadRegistry().RegisterThread(t);
pthread_mutex_unlock((pthread_mutex_t*)&opaque_storage_);
}
-#ifdef __arm__
-#define UNWIND_STOP _URC_END_OF_STACK
-#define UNWIND_CONTINUE _URC_NO_REASON
-#else
-#define UNWIND_STOP _URC_NORMAL_STOP
-#define UNWIND_CONTINUE _URC_NO_REASON
-#endif
-
-uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
-#ifdef __arm__
- uptr val;
- _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
- 15 /* r15 = PC */, _UVRSD_UINT32, &val);
- CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
- // Clear the Thumb bit.
- return val & ~(uptr)1;
-#else
- return _Unwind_GetIP(ctx);
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
+#if defined(__arm__) || \
+ defined(__powerpc__) || defined(__powerpc64__) || \
+ defined(__sparc__)
+ fast = false;
#endif
-}
-
-_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx,
- void *param) {
- StackTrace *b = (StackTrace*)param;
- CHECK(b->size < b->max_size);
- uptr pc = Unwind_GetIP(ctx);
- b->trace[b->size++] = pc;
- if (b->size == b->max_size) return UNWIND_STOP;
- return UNWIND_CONTINUE;
-}
-
-void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
+ if (!fast)
+ return stack->SlowUnwindStack(pc, max_s, 3);
stack->size = 0;
stack->trace[0] = pc;
- if ((max_s) > 1) {
+ if (max_s > 1) {
stack->max_size = max_s;
-#if defined(__arm__) || \
- defined(__powerpc__) || defined(__powerpc64__) || \
- defined(__sparc__)
- _Unwind_Backtrace(Unwind_Trace, stack);
- // Pop off the two ASAN functions from the backtrace.
- stack->PopStackFrames(2);
-#else
if (!asan_inited) return;
if (AsanThread *t = asanThreadRegistry().GetCurrent())
stack->FastUnwindStack(pc, bp, t->stack_top(), t->stack_bottom());
-#endif
}
}
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
-void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp) {
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast) {
stack->size = 0;
stack->trace[0] = pc;
if ((max_s) > 1) {
// alloc_asan_context().
extern "C"
void asan_dispatch_call_block_and_release(void *block) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *context = (asan_block_context_t*)block;
if (flags()->verbosity >= 2) {
Report("asan_dispatch_call_block_and_release(): "
#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
dispatch_function_t func) { \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE_THREAD; \
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
if (flags()->verbosity >= 2) { \
Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
dispatch_queue_t dq, void *ctxt,
dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) {
Report("dispatch_after_f: %p\n", asan_ctxt);
INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
dispatch_queue_t dq, void *ctxt,
dispatch_function_t func) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
if (flags()->verbosity >= 2) {
Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
void (^asan_block)(void); \
int parent_tid = asanThreadRegistry().GetCurrentTidOrInvalid(); \
asan_block = ^(void) { \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE_THREAD; \
asan_register_worker_thread(parent_tid, &stack); \
work(); \
}
asan_block_context_t *ctxt = (asan_block_context_t*)arg;
worker_t fn = (worker_t)(ctxt->func);
void *result = fn(ctxt->block);
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_free(arg, &stack);
return result;
}
INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
void *(*workitem_func)(void *), void * workitem_arg,
pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
- GET_STACK_TRACE_HERE(kStackTraceMax);
+ GET_STACK_TRACE_THREAD;
asan_block_context_t *asan_ctxt =
(asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
asan_ctxt->block = workitem_arg;
using namespace __asan; // NOLINT
INTERCEPTOR(void, free, void *ptr) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack);
}
INTERCEPTOR(void, cfree, void *ptr) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack);
}
INTERCEPTOR(void*, malloc, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
CHECK(allocated < kCallocPoolSize);
return mem;
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_memalign(boundary, size, &stack);
}
ALIAS("memalign");
INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);
}
}
INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
// Printf("posix_memalign: %zx %zu\n", alignment, size);
return asan_posix_memalign(memptr, alignment, size, &stack);
}
INTERCEPTOR(void*, valloc, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_valloc(size, &stack);
}
INTERCEPTOR(void*, pvalloc, uptr size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_pvalloc(size, &stack);
}
#endif
} else {
if (!asan_mz_size(ptr)) ptr = get_saved_cfallocator_ref(ptr);
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack);
}
}
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
CHECK(system_malloc_zone);
return malloc_zone_malloc(system_malloc_zone, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
CHECK(allocated < kCallocPoolSize);
return mem;
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
CHECK(system_malloc_zone);
return malloc_zone_valloc(system_malloc_zone, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_memalign(GetPageSizeCached(), size, &stack);
}
void ALWAYS_INLINE free_common(void *context, void *ptr) {
if (!ptr) return;
if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
asan_free(ptr, &stack);
} else {
// If the pointer does not belong to any of the zones, use one of the
// If the memory chunk pointer was moved to store additional
// CFAllocatorRef, fix it back.
ptr = get_saved_cfallocator_ref(ptr);
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
if (!flags()->mac_ignore_invalid_free) {
asan_free(ptr, &stack);
} else {
void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
if (!ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
} else {
if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
} else {
// We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from
// potentially unaccessible memory.
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
GET_ZONE_FOR_PTR(ptr);
ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
}
void *cf_realloc(void *ptr, CFIndex size, CFOptionFlags hint, void *info) {
if (!ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
} else {
if (asan_mz_size(ptr)) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
} else {
// We can't recover from reallocating an unknown address, because
// this would require reading at most |size| bytes from
// potentially unaccessible memory.
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ ET_STACK_TRACE_FREE(ptr);
GET_ZONE_FOR_PTR(ptr);
ReportMacCfReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
}
CHECK(system_malloc_zone);
return malloc_zone_memalign(system_malloc_zone, align, size);
}
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_memalign(align, size, &stack);
}
extern "C" {
void free(void *ptr) {
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);
+ GET_STACK_TRACE_FREE;
return asan_free(ptr, &stack);
}
}
void *malloc(size_t size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc(size, &stack);
}
}
void *calloc(size_t nmemb, size_t size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_calloc(nmemb, size, &stack);
}
}
void *realloc(void *ptr, size_t size) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_realloc(ptr, size, &stack);
}
}
size_t _msize(void *ptr) {
- GET_STACK_TRACE_HERE_FOR_MALLOC;
+ GET_STACK_TRACE_MALLOC;
return asan_malloc_usable_size(ptr, &stack);
}
} // namespace std
#define OPERATOR_NEW_BODY \
- GET_STACK_TRACE_HERE_FOR_MALLOC;\
+ GET_STACK_TRACE_MALLOC;\
return asan_memalign(0, size, &stack);
INTERCEPTOR_ATTRIBUTE
void *operator new[](size_t size, std::nothrow_t const&) { OPERATOR_NEW_BODY; }
#define OPERATOR_DELETE_BODY \
- GET_STACK_TRACE_HERE_FOR_FREE(ptr);\
+ GET_STACK_TRACE_FREE;\
asan_free(ptr, &stack);
INTERCEPTOR_ATTRIBUTE
(void*)addr, (void*)pc, (void*)sp, (void*)bp,
asanThreadRegistry().GetCurrentTidOrInvalid());
Printf("AddressSanitizer can not provide additional info.\n");
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
+ GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack);
}
access_size, (void*)addr, curr_tid,
ThreadNameWithParenthesis(curr_tid, tname, sizeof(tname)));
- GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp);
+ GET_STACK_TRACE_FATAL(pc, bp);
PrintStack(&stack);
DescribeAddress(addr, access_size);
ParseFlag(str, &f->allow_reexec, "allow_reexec");
ParseFlag(str, &f->print_full_thread_history, "print_full_thread_history");
ParseFlag(str, &f->log_path, "log_path");
+ ParseFlag(str, &f->fast_unwind_on_fatal, "fast_unwind_on_fatal");
+ ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
}
void InitializeFlags(Flags *f, const char *env) {
f->allow_reexec = true;
f->print_full_thread_history = true;
f->log_path = 0;
+ f->fast_unwind_on_fatal = true;
+ f->fast_unwind_on_malloc = true;
// Override from user-specified string.
ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());
#define ASAN_STACK_H
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "asan_flags.h"
namespace __asan {
-void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp);
+void GetStackTrace(StackTrace *stack, uptr max_s, uptr pc, uptr bp, bool fast);
void PrintStack(StackTrace *stack);
} // namespace __asan
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
// fast_unwind is currently unused.
-#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp) \
+#define GET_STACK_TRACE_WITH_PC_AND_BP(max_s, pc, bp, fast) \
StackTrace stack; \
- GetStackTrace(&stack, max_s, pc, bp)
+ GetStackTrace(&stack, max_s, pc, bp, fast)
// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
-#define GET_STACK_TRACE_HERE(max_size) \
+#define GET_STACK_TRACE(max_size, fast) \
GET_STACK_TRACE_WITH_PC_AND_BP(max_size, \
- StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+ StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), fast)
-#define GET_STACK_TRACE_HERE_FOR_MALLOC \
- GET_STACK_TRACE_HERE(flags()->malloc_context_size)
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ GET_STACK_TRACE_WITH_PC_AND_BP(kStackTraceMax, pc, bp, \
+ flags()->fast_unwind_on_fatal)
-#define GET_STACK_TRACE_HERE_FOR_FREE(ptr) \
- GET_STACK_TRACE_HERE(flags()->malloc_context_size)
+#define GET_STACK_TRACE_FATAL_HERE \
+ GET_STACK_TRACE(kStackTraceMax, flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_THREAD \
+ GET_STACK_TRACE(kStackTraceMax, true)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(flags()->malloc_context_size, \
+ flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
#define PRINT_CURRENT_STACK() \
{ \
- GET_STACK_TRACE_HERE(kStackTraceMax); \
+ GET_STACK_TRACE(kStackTraceMax, \
+ flags()->fast_unwind_on_fatal); \
PrintStack(&stack); \
}
#include "sanitizer_mutex.h"
#include "sanitizer_placement_new.h"
#include "sanitizer_procmaps.h"
+#include "sanitizer_stacktrace.h"
#include <fcntl.h>
#include <pthread.h>
#include <sys/time.h>
#include <sys/types.h>
#include <unistd.h>
+#include <unwind.h>
#include <errno.h>
#include <sys/prctl.h>
return true;
}
+//------------------------- SlowUnwindStack -----------------------------------
+#ifdef __arm__
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#ifdef __arm__
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return _Unwind_GetIP(ctx);
+#endif
+}
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ StackTrace *b = (StackTrace*)param;
+ CHECK(b->size < b->max_size);
+ uptr pc = Unwind_GetIP(ctx);
+ b->trace[b->size++] = pc;
+ if (b->size == b->max_size) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth, uptr frames_to_pop) {
+ this->size = 0;
+ this->trace[0] = pc;
+ this->max_size = max_depth;
+ if (max_depth > 1) {
+ _Unwind_Backtrace(Unwind_Trace, this);
+ this->PopStackFrames(frames_to_pop);
+ }
+}
+
} // namespace __sanitizer
#endif // __linux__
}
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
+ void SlowUnwindStack(uptr pc, uptr max_depth, uptr frames_to_pop);
void PopStackFrames(uptr count);