From 04eeec32e6d39d7c102d31a5e196ff9907d3cc60 Mon Sep 17 00:00:00 2001 From: Alexey Samsonov Date: Fri, 19 Dec 2014 20:35:53 +0000 Subject: [PATCH] [ASan] Change activation strategy. Now ASan deactivation doesn't modify common or ASan-specific runtime flags. Flags stay constant after initialization, and "deactivation" instead stashes initialized runtime state, and deactivates the runtime. Activation then just restores the original state (possibly, overriden by some activation flags provided in system property on Android). llvm-svn: 224614 --- compiler-rt/lib/asan/asan_activation.cc | 48 ++++++++++------------ compiler-rt/lib/asan/asan_activation.h | 2 +- compiler-rt/lib/asan/asan_allocator.cc | 21 ++++++++++ compiler-rt/lib/asan/asan_allocator.h | 2 + compiler-rt/lib/asan/asan_flags.cc | 11 ++--- compiler-rt/lib/asan/asan_rtl.cc | 5 +++ .../lib/sanitizer_common/sanitizer_allocator.h | 6 ++- 7 files changed, 59 insertions(+), 36 deletions(-) diff --git a/compiler-rt/lib/asan/asan_activation.cc b/compiler-rt/lib/asan/asan_activation.cc index b7c00fd..a5fa9c8 100644 --- a/compiler-rt/lib/asan/asan_activation.cc +++ b/compiler-rt/lib/asan/asan_activation.cc @@ -27,23 +27,12 @@ static struct AsanDeactivatedFlags { int malloc_context_size; bool poison_heap; - void CopyFrom(const Flags *f, const CommonFlags *cf) { - allocator_options.SetFrom(f, cf); - malloc_context_size = cf->malloc_context_size; - poison_heap = f->poison_heap; - } - void OverrideFromActivationFlags() { Flags f; CommonFlags cf; // Copy the current activation flags. - f.quarantine_size = allocator_options.quarantine_size_mb << 20; - f.redzone = allocator_options.min_redzone; - f.max_redzone = allocator_options.max_redzone; - cf.allocator_may_return_null = allocator_options.may_return_null; - f.alloc_dealloc_mismatch = allocator_options.alloc_dealloc_mismatch; - + allocator_options.CopyTo(&f, &cf); cf.malloc_context_size = malloc_context_size; f.poison_heap = poison_heap; @@ -55,7 +44,9 @@ static struct AsanDeactivatedFlags { ParseCommonFlagsFromString(&cf, buf); ParseFlagsFromString(&f, buf); - CopyFrom(&f, &cf); + allocator_options.SetFrom(&f, &cf); + malloc_context_size = cf.malloc_context_size; + poison_heap = f.poison_heap; } void Print() { @@ -71,20 +62,25 @@ static struct AsanDeactivatedFlags { static bool asan_is_deactivated; -void AsanStartDeactivated() { +void AsanDeactivate() { + CHECK(!asan_is_deactivated); VReport(1, "Deactivating ASan\n"); - // Save flag values. - asan_deactivated_flags.CopyFrom(flags(), common_flags()); - - // FIXME: Don't overwrite commandline flags. Instead, make the flags store - // the original values calculated during flag parsing, and re-initialize - // the necessary runtime objects. - flags()->quarantine_size = 0; - flags()->max_redzone = 16; - flags()->poison_heap = false; - common_flags()->malloc_context_size = 0; - flags()->alloc_dealloc_mismatch = false; - common_flags()->allocator_may_return_null = true; + + // Stash runtime state. + GetAllocatorOptions(&asan_deactivated_flags.allocator_options); + asan_deactivated_flags.malloc_context_size = GetMallocContextSize(); + asan_deactivated_flags.poison_heap = CanPoisonMemory(); + + // Deactivate the runtime. + SetCanPoisonMemory(false); + SetMallocContextSize(1); + AllocatorOptions disabled = asan_deactivated_flags.allocator_options; + disabled.quarantine_size_mb = 0; + disabled.min_redzone = 16; // Redzone must be at least 16 bytes long. + disabled.max_redzone = 16; + disabled.alloc_dealloc_mismatch = false; + disabled.may_return_null = true; + ReInitializeAllocator(disabled); asan_is_deactivated = true; } diff --git a/compiler-rt/lib/asan/asan_activation.h b/compiler-rt/lib/asan/asan_activation.h index dafb840..d5e1ce4 100644 --- a/compiler-rt/lib/asan/asan_activation.h +++ b/compiler-rt/lib/asan/asan_activation.h @@ -16,7 +16,7 @@ #define ASAN_ACTIVATION_H namespace __asan { -void AsanStartDeactivated(); +void AsanDeactivate(); void AsanActivate(); } // namespace __asan diff --git a/compiler-rt/lib/asan/asan_allocator.cc b/compiler-rt/lib/asan/asan_allocator.cc index b5a09bd..e2e0fb5 100644 --- a/compiler-rt/lib/asan/asan_allocator.cc +++ b/compiler-rt/lib/asan/asan_allocator.cc @@ -213,6 +213,14 @@ void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; } +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { + f->quarantine_size = (int)quarantine_size_mb << 20; + f->redzone = min_redzone; + f->max_redzone = max_redzone; + cf->allocator_may_return_null = may_return_null; + f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; +} + struct Allocator { static const uptr kMaxAllowedMallocSize = FIRST_32_SECOND_64(3UL << 30, 64UL << 30); @@ -263,6 +271,15 @@ struct Allocator { SharedInitCode(options); } + void GetOptions(AllocatorOptions *options) const { + options->quarantine_size_mb = quarantine.GetSize() >> 20; + options->min_redzone = atomic_load(&min_redzone, memory_order_acquire); + options->max_redzone = atomic_load(&max_redzone, memory_order_acquire); + options->may_return_null = allocator.MayReturnNull(); + options->alloc_dealloc_mismatch = + atomic_load(&alloc_dealloc_mismatch, memory_order_acquire); + } + // -------------------- Helper methods. ------------------------- uptr ComputeRZLog(uptr user_requested_size) { u32 rz_log = @@ -662,6 +679,10 @@ void ReInitializeAllocator(const AllocatorOptions &options) { instance.ReInitialize(options); } +void GetAllocatorOptions(AllocatorOptions *options) { + instance.GetOptions(options); +} + AsanChunkView FindHeapChunkByAddress(uptr addr) { return instance.FindHeapChunkByAddress(addr); } diff --git a/compiler-rt/lib/asan/asan_allocator.h b/compiler-rt/lib/asan/asan_allocator.h index 2072954..521d47b 100644 --- a/compiler-rt/lib/asan/asan_allocator.h +++ b/compiler-rt/lib/asan/asan_allocator.h @@ -40,10 +40,12 @@ struct AllocatorOptions { u8 alloc_dealloc_mismatch; void SetFrom(const Flags *f, const CommonFlags *cf); + void CopyTo(Flags *f, CommonFlags *cf); }; void InitializeAllocator(const AllocatorOptions &options); void ReInitializeAllocator(const AllocatorOptions &options); +void GetAllocatorOptions(AllocatorOptions *options); class AsanChunkView { public: diff --git a/compiler-rt/lib/asan/asan_flags.cc b/compiler-rt/lib/asan/asan_flags.cc index a478006..c0635df 100644 --- a/compiler-rt/lib/asan/asan_flags.cc +++ b/compiler-rt/lib/asan/asan_flags.cc @@ -247,14 +247,9 @@ void InitializeFlags(Flags *f) { VReport(1, "Parsed ASAN_OPTIONS: %s\n", env); } - // If ASan starts in deactivated state, stash and clear some flags. - // Otherwise, let activation flags override current settings. - if (flags()->start_deactivated) { - AsanStartDeactivated(); - } else { - // Parse flags that may change between startup and activation. - // On Android they come from a system property. - // On other platforms this is no-op. + // Let activation flags override current settings. On Android they come + // from a system property. On other platforms this is no-op. + if (!flags()->start_deactivated) { char buf[100]; GetExtraActivationFlags(buf, sizeof(buf)); ParseCommonFlagsFromString(cf, buf); diff --git a/compiler-rt/lib/asan/asan_rtl.cc b/compiler-rt/lib/asan/asan_rtl.cc index e1c8769..895ac6b 100644 --- a/compiler-rt/lib/asan/asan_rtl.cc +++ b/compiler-rt/lib/asan/asan_rtl.cc @@ -397,6 +397,11 @@ static void AsanInitInternal() { MaybeStartBackgroudThread(); + // Now that ASan runtime is (mostly) initialized, deactivate it if + // necessary, so that it can be re-activated when requested. + if (flags()->start_deactivated) + AsanDeactivate(); + // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited // should be set to 1 prior to initializing the threads. asan_inited = 1; diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h index d3723eb..d749acb 100644 --- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h @@ -1300,8 +1300,12 @@ class CombinedAllocator { return res; } + bool MayReturnNull() const { + return atomic_load(&may_return_null_, memory_order_acquire); + } + void *ReturnNullOrDie() { - if (atomic_load(&may_return_null_, memory_order_acquire)) + if (MayReturnNull()) return 0; ReportAllocatorCannotReturnNull(); } -- 2.7.4