Summary:
In order to avoid starting a separate thread to return unused memory to
the system (the thread interferes with process startup on Android,
Zygota waits for all threads to exit before fork, but this thread never
exits), try to return it right after free.
Reviewers: eugenis
Subscribers: cryptoad, filcab, danalbert, kubabrecka, llvm-commits
Patch by Aleksey Shlyapnikov.
Differential Revision: https://reviews.llvm.org/D27003
git-svn-id: https://llvm.org/svn/llvm-project/compiler-rt/trunk@288091
91177308-0d34-0410-b5e6-
96231b3b80d8
Change-Id: I2e01818bbdf3a0b9f0855b28d963b8016000abc8
14 files changed:
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
- "allocator_may_return_null %d, coverage %d, coverage_dir %s\n",
+ "allocator_may_return_null %d, coverage %d, coverage_dir %s, "
+ "allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
- allocator_options.may_return_null, coverage, coverage_dir);
+ allocator_options.may_return_null, coverage, coverage_dir,
+ allocator_options.release_to_os_interval_ms);
}
} asan_deactivated_flags;
}
} asan_deactivated_flags;
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
COMMON_ACTIVATION_FLAG(int, verbosity)
COMMON_ACTIVATION_FLAG(bool, help)
+COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
max_redzone = f->max_redzone;
may_return_null = cf->allocator_may_return_null;
alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+ release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
}
void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
f->max_redzone = max_redzone;
cf->allocator_may_return_null = may_return_null;
f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+ cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
}
void Initialize(const AllocatorOptions &options) {
}
void Initialize(const AllocatorOptions &options) {
- allocator.Init(options.may_return_null);
+ allocator.Init(options.may_return_null, options.release_to_os_interval_ms);
SharedInitCode(options);
}
SharedInitCode(options);
}
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
void ReInitialize(const AllocatorOptions &options) {
allocator.SetMayReturnNull(options.may_return_null);
+ allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
SharedInitCode(options);
// Poison all existing allocation's redzones.
SharedInitCode(options);
// Poison all existing allocation's redzones.
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
options->may_return_null = allocator.MayReturnNull();
options->alloc_dealloc_mismatch =
atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
}
// -------------------- Helper methods. -------------------------
}
// -------------------- Helper methods. -------------------------
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
-
- void ReleaseToOS() { allocator.ReleaseToOS(); }
};
static Allocator instance(LINKER_INITIALIZED);
};
static Allocator instance(LINKER_INITIALIZED);
return GetStackTraceFromId(GetFreeStackId());
}
return GetStackTraceFromId(GetFreeStackId());
}
-void ReleaseToOS() { instance.ReleaseToOS(); }
-
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
- SetAllocatorReleaseToOSCallback(ReleaseToOS);
}
void ReInitializeAllocator(const AllocatorOptions &options) {
}
void ReInitializeAllocator(const AllocatorOptions &options) {
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
u16 max_redzone;
u8 may_return_null;
u8 alloc_dealloc_mismatch;
+ s32 release_to_os_interval_ms;
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
void SetFrom(const Flags *f, const CommonFlags *cf);
void CopyTo(Flags *f, CommonFlags *cf);
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
static THREADLOCAL AllocatorCache cache;
void InitializeAllocator() {
- allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
void AllocatorThreadFinish() {
}
void AllocatorThreadFinish() {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
SpinMutexLock l(&internal_alloc_init_mu);
if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
0) {
- internal_allocator_instance->Init(/* may_return_null*/ false);
+ internal_allocator_instance->Init(
+ /* may_return_null */ false, kReleaseToOSIntervalNever);
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
atomic_store(&internal_allocator_initialized, 1, memory_order_release);
}
}
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
class SecondaryAllocator> // NOLINT
class CombinedAllocator {
public:
- void InitCommon(bool may_return_null) {
- primary_.Init();
+ void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
+ primary_.Init(release_to_os_interval_ms);
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
}
- void InitLinkerInitialized(bool may_return_null) {
+ void InitLinkerInitialized(
+ bool may_return_null, s32 release_to_os_interval_ms) {
secondary_.InitLinkerInitialized(may_return_null);
stats_.InitLinkerInitialized();
secondary_.InitLinkerInitialized(may_return_null);
stats_.InitLinkerInitialized();
- InitCommon(may_return_null);
+ InitCommon(may_return_null, release_to_os_interval_ms);
- void Init(bool may_return_null) {
+ void Init(bool may_return_null, s32 release_to_os_interval_ms) {
secondary_.Init(may_return_null);
stats_.Init();
secondary_.Init(may_return_null);
stats_.Init();
- InitCommon(may_return_null);
+ InitCommon(may_return_null, release_to_os_interval_ms);
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
atomic_store(&may_return_null_, may_return_null, memory_order_release);
}
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ }
+
bool RssLimitIsExceeded() {
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
}
bool RssLimitIsExceeded() {
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
}
- void ReleaseToOS() { primary_.ReleaseToOS(); }
-
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
+ void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
}
+ s32 ReleaseToOSIntervalMs() const {
+ return kReleaseToOSIntervalNever;
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ }
+
void *MapWithCallback(uptr size) {
size = RoundUpTo(size, GetPageSizeCached());
void *res = MmapOrDie(size, "SizeClassAllocator32");
void *MapWithCallback(uptr size) {
size = RoundUpTo(size, GetPageSizeCached());
void *res = MmapOrDie(size, "SizeClassAllocator32");
- // This is empty here. Currently only implemented in 64-bit allocator.
- void ReleaseToOS() { }
-
-
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
}
+ void Init(s32 release_to_os_interval_ms) {
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
if (kUsingConstantSpaceBeg) {
CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
if (kUsingConstantSpaceBeg) {
CHECK_EQ(kSpaceBeg, reinterpret_cast<uptr>(
reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
reinterpret_cast<uptr>(MmapNoAccess(TotalSpaceSize));
CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
}
+ SetReleaseToOSIntervalMs(release_to_os_interval_ms);
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
}
MapWithCallbackOrDie(SpaceEnd(), AdditionalSize());
}
+ s32 ReleaseToOSIntervalMs() const {
+ return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
+ memory_order_relaxed);
+ }
+
static bool CanAllocate(uptr size, uptr alignment) {
return size <= SizeClassMap::kMaxSize &&
alignment <= SizeClassMap::kMaxSize;
static bool CanAllocate(uptr size, uptr alignment) {
return size <= SizeClassMap::kMaxSize &&
alignment <= SizeClassMap::kMaxSize;
- void ReleaseToOS() {
- for (uptr class_id = 1; class_id < kNumClasses; class_id++)
- ReleaseToOS(class_id);
- }
-
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
static const uptr kFreeArrayMapSize = 1 << 16;
static const uptr kMetaMapSize = 1 << 16;
// Call mmap for free array memory with at least this size.
static const uptr kFreeArrayMapSize = 1 << 16;
- // Granularity of ReleaseToOs (aka madvise).
- static const uptr kReleaseToOsGranularity = 1 << 12;
+
+ atomic_sint32_t release_to_os_interval_ms_;
struct Stats {
uptr n_allocated;
struct Stats {
uptr n_allocated;
struct ReleaseToOsInfo {
uptr n_freed_at_last_release;
uptr num_releases;
struct ReleaseToOsInfo {
uptr n_freed_at_last_release;
uptr num_releases;
+ u64 last_release_at_ns;
CompactPtrT first, CompactPtrT last) {
uptr beg_ptr = CompactPtrToPointer(region_beg, first);
uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
CompactPtrT first, CompactPtrT last) {
uptr beg_ptr = CompactPtrToPointer(region_beg, first);
uptr end_ptr = CompactPtrToPointer(region_beg, last) + chunk_size;
- CHECK_GE(end_ptr - beg_ptr, kReleaseToOsGranularity);
- beg_ptr = RoundUpTo(beg_ptr, kReleaseToOsGranularity);
- end_ptr = RoundDownTo(end_ptr, kReleaseToOsGranularity);
+ const uptr page_size = GetPageSizeCached();
+ CHECK_GE(end_ptr - beg_ptr, page_size);
+ beg_ptr = RoundUpTo(beg_ptr, page_size);
+ end_ptr = RoundDownTo(end_ptr, page_size);
if (end_ptr == beg_ptr) return false;
ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr);
return true;
}
if (end_ptr == beg_ptr) return false;
ReleaseMemoryToOS(beg_ptr, end_ptr - beg_ptr);
return true;
}
- // Releases some RAM back to OS.
+ // Attempts to release some RAM back to OS. The region is expected to be
+ // locked.
// * Sort the chunks.
// * Find ranges fully covered by free-d chunks
// * Release them to OS with madvise.
// * Sort the chunks.
// * Find ranges fully covered by free-d chunks
// * Release them to OS with madvise.
- //
- // TODO(kcc): make sure we don't do it too frequently.
- void ReleaseToOS(uptr class_id) {
+ void MaybeReleaseToOS(uptr class_id) {
RegionInfo *region = GetRegionInfo(class_id);
RegionInfo *region = GetRegionInfo(class_id);
+ const uptr chunk_size = ClassIdToSize(class_id);
+ const uptr page_size = GetPageSizeCached();
+
+ uptr n = region->num_freed_chunks;
+ if (n * chunk_size < page_size)
+ return; // No chance to release anything.
+ if ((region->n_freed - region->rtoi.n_freed_at_last_release) * chunk_size <
+ page_size) {
+ return; // Nothing new to release.
+ }
+
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
+
+ u64 now_ns = NanoTime();
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL > now_ns)
+ return; // Memory was returned recently.
+ region->rtoi.last_release_at_ns = now_ns;
+
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
uptr region_beg = GetRegionBeginBySizeClass(class_id);
CompactPtrT *free_array = GetFreeArray(region_beg);
- uptr chunk_size = ClassIdToSize(class_id);
- uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
- const uptr kScaledGranularity = kReleaseToOsGranularity >> kCompactPtrScale;
- BlockingMutexLock l(®ion->mutex);
- uptr n = region->num_freed_chunks;
- if (n * chunk_size < kReleaseToOsGranularity)
- return; // No chance to release anything.
- if ((region->rtoi.n_freed_at_last_release - region->stats.n_freed) * chunk_size <
- kReleaseToOsGranularity)
- return; // Nothing new to release.
SortArray(free_array, n);
SortArray(free_array, n);
- uptr beg = free_array[0];
+
+ const uptr scaled_chunk_size = chunk_size >> kCompactPtrScale;
+ const uptr kScaledGranularity = page_size >> kCompactPtrScale;
+
+ uptr range_beg = free_array[0];
uptr prev = free_array[0];
for (uptr i = 1; i < n; i++) {
uptr chunk = free_array[i];
CHECK_GT(chunk, prev);
if (chunk - prev != scaled_chunk_size) {
CHECK_GT(chunk - prev, scaled_chunk_size);
uptr prev = free_array[0];
for (uptr i = 1; i < n; i++) {
uptr chunk = free_array[i];
CHECK_GT(chunk, prev);
if (chunk - prev != scaled_chunk_size) {
CHECK_GT(chunk - prev, scaled_chunk_size);
- if (prev + scaled_chunk_size - beg >= kScaledGranularity) {
- MaybeReleaseChunkRange(region_beg, chunk_size, beg, prev);
- region->rtoi.n_freed_at_last_release = region->stats.n_freed;
+ if (prev + scaled_chunk_size - range_beg >= kScaledGranularity) {
+ MaybeReleaseChunkRange(region_beg, chunk_size, range_beg, prev);
+ region->rtoi.n_freed_at_last_release = region->n_freed;
region->rtoi.num_releases++;
}
region->rtoi.num_releases++;
}
volatile Type val_dont_use;
};
volatile Type val_dont_use;
};
+struct atomic_sint32_t {
+ typedef s32 Type;
+ volatile Type val_dont_use;
+};
+
struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;
struct atomic_uint32_t {
typedef u32 Type;
volatile Type val_dont_use;
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
// The callback should be registered once at the tool init time.
void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
-// Callback to be called when we want to try releasing unused allocator memory
-// back to the OS.
-typedef void (*AllocatorReleaseToOSCallback)();
-// The callback should be registered once at the tool init time.
-void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback);
-
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
bool IsHandledDeadlySignal(int signum);
// Functions related to signal handling.
typedef void (*SignalHandlerType)(int, void *, void *);
bool IsHandledDeadlySignal(int signum);
+// The default value for allocator_release_to_os_interval_ms common flag to
+// indicate that sanitizer allocator should not attempt to release memory to OS.
+const s32 kReleaseToOSIntervalNever = -1;
+
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
} // namespace __sanitizer
inline void *operator new(__sanitizer::operator_new_size_type size,
SoftRssLimitExceededCallback = Callback;
}
SoftRssLimitExceededCallback = Callback;
}
-static AllocatorReleaseToOSCallback ReleseCallback;
-void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback) {
- CHECK_EQ(ReleseCallback, nullptr);
- ReleseCallback = Callback;
-}
-
#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
bool heap_profile = common_flags()->heap_profile;
#if SANITIZER_LINUX && !SANITIZER_GO
void BackgroundThread(void *arg) {
uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
bool heap_profile = common_flags()->heap_profile;
- bool allocator_release_to_os = common_flags()->allocator_release_to_os;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
uptr prev_reported_rss = 0;
uptr prev_reported_stack_depot_size = 0;
bool reached_soft_rss_limit = false;
SoftRssLimitExceededCallback(false);
}
}
SoftRssLimitExceededCallback(false);
}
}
- if (allocator_release_to_os && ReleseCallback) ReleseCallback();
if (heap_profile &&
current_rss_mb > rss_during_last_reported_profile * 1.1) {
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
if (heap_profile &&
current_rss_mb > rss_during_last_reported_profile * 1.1) {
Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
// Start the background thread if one of the rss limits is given.
if (!common_flags()->hard_rss_limit_mb &&
!common_flags()->soft_rss_limit_mb &&
- !common_flags()->allocator_release_to_os &&
!common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
!common_flags()->heap_profile) return;
if (!&real_pthread_create) return; // Can't spawn the thread anyway.
internal_start_thread(BackgroundThread, nullptr);
" This limit does not affect memory allocations other than"
" malloc/new.")
COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
" This limit does not affect memory allocations other than"
" malloc/new.")
COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
-COMMON_FLAG(bool, allocator_release_to_os, false,
- "Experimental. If true, try to periodically release unused"
- " memory to the OS.\n")
+COMMON_FLAG(s32, allocator_release_to_os_interval_ms, kReleaseToOSIntervalNever,
+ "Experimental. Only affects a 64-bit allocator. If set, tries to "
+ "release unused memory to the OS, but not more often than this "
+ "interval (in milliseconds). Negative values mean do not attempt "
+ "to release memory to the OS.\n")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
COMMON_FLAG(bool, can_use_proc_maps_statm, true,
"If false, do not attempt to read /proc/maps/statm."
" Mostly useful for testing sanitizers.")
}
void InitializeAllocator() {
}
void InitializeAllocator() {
- allocator()->Init(common_flags()->allocator_may_return_null);
+ allocator()->Init(
+ common_flags()->allocator_may_return_null,
+ common_flags()->allocator_release_to_os_interval_ms);
}
void InitializeAllocatorLate() {
}
void InitializeAllocatorLate() {