1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 // This class implements a complete memory allocator by using two
16 // internal allocators:
17 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
18 // When allocating 2^x bytes it should return 2^x aligned chunk.
19 // PrimaryAllocator is used via a local AllocatorCache.
20 // SecondaryAllocator can allocate anything, but is not efficient.
21 template <class PrimaryAllocator, class AllocatorCache,
22 class SecondaryAllocator> // NOLINT
23 class CombinedAllocator {
25 void InitCommon(bool may_return_null, s32 release_to_os_interval_ms) {
26 primary_.Init(release_to_os_interval_ms);
27 atomic_store(&may_return_null_, may_return_null, memory_order_relaxed);
30 void InitLinkerInitialized(
31 bool may_return_null, s32 release_to_os_interval_ms) {
32 secondary_.InitLinkerInitialized(may_return_null);
33 stats_.InitLinkerInitialized();
34 InitCommon(may_return_null, release_to_os_interval_ms);
37 void Init(bool may_return_null, s32 release_to_os_interval_ms) {
38 secondary_.Init(may_return_null);
40 InitCommon(may_return_null, release_to_os_interval_ms);
43 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
44 bool cleared = false, bool check_rss_limit = false) {
45 // Returning 0 on malloc(0) may break a lot of code.
48 if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
49 if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
51 size = RoundUpTo(size, alignment);
53 bool from_primary = primary_.CanAllocate(size, alignment);
55 res = cache->Allocate(&primary_, primary_.ClassID(size));
57 res = secondary_.Allocate(&stats_, size, alignment);
59 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
60 if (cleared && res && from_primary)
61 internal_bzero_aligned16(res, RoundUpTo(size, 16));
65 bool MayReturnNull() const {
66 return atomic_load(&may_return_null_, memory_order_acquire);
69 void *ReturnNullOrDieOnBadRequest() {
72 ReportAllocatorCannotReturnNull(false);
75 void *ReturnNullOrDieOnOOM() {
76 if (MayReturnNull()) return nullptr;
77 ReportAllocatorCannotReturnNull(true);
80 void SetMayReturnNull(bool may_return_null) {
81 secondary_.SetMayReturnNull(may_return_null);
82 atomic_store(&may_return_null_, may_return_null, memory_order_release);
85 s32 ReleaseToOSIntervalMs() const {
86 return primary_.ReleaseToOSIntervalMs();
89 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
90 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
93 bool RssLimitIsExceeded() {
94 return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
97 void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
98 atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
99 memory_order_release);
102 void Deallocate(AllocatorCache *cache, void *p) {
104 if (primary_.PointerIsMine(p))
105 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
107 secondary_.Deallocate(&stats_, p);
110 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
113 return Allocate(cache, new_size, alignment);
115 Deallocate(cache, p);
118 CHECK(PointerIsMine(p));
119 uptr old_size = GetActuallyAllocatedSize(p);
120 uptr memcpy_size = Min(new_size, old_size);
121 void *new_p = Allocate(cache, new_size, alignment);
123 internal_memcpy(new_p, p, memcpy_size);
124 Deallocate(cache, p);
128 bool PointerIsMine(void *p) {
129 if (primary_.PointerIsMine(p))
131 return secondary_.PointerIsMine(p);
134 bool FromPrimary(void *p) {
135 return primary_.PointerIsMine(p);
138 void *GetMetaData(const void *p) {
139 if (primary_.PointerIsMine(p))
140 return primary_.GetMetaData(p);
141 return secondary_.GetMetaData(p);
144 void *GetBlockBegin(const void *p) {
145 if (primary_.PointerIsMine(p))
146 return primary_.GetBlockBegin(p);
147 return secondary_.GetBlockBegin(p);
150 // This function does the same as GetBlockBegin, but is much faster.
151 // Must be called with the allocator locked.
152 void *GetBlockBeginFastLocked(void *p) {
153 if (primary_.PointerIsMine(p))
154 return primary_.GetBlockBegin(p);
155 return secondary_.GetBlockBeginFastLocked(p);
158 uptr GetActuallyAllocatedSize(void *p) {
159 if (primary_.PointerIsMine(p))
160 return primary_.GetActuallyAllocatedSize(p);
161 return secondary_.GetActuallyAllocatedSize(p);
164 uptr TotalMemoryUsed() {
165 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
168 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
170 void InitCache(AllocatorCache *cache) {
171 cache->Init(&stats_);
174 void DestroyCache(AllocatorCache *cache) {
175 cache->Destroy(&primary_, &stats_);
178 void SwallowCache(AllocatorCache *cache) {
179 cache->Drain(&primary_);
182 void GetStats(AllocatorStatCounters s) const {
187 primary_.PrintStats();
188 secondary_.PrintStats();
191 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
192 // introspection API.
194 primary_.ForceLock();
195 secondary_.ForceLock();
199 secondary_.ForceUnlock();
200 primary_.ForceUnlock();
203 bool PointsIntoChunk(const void *p) {
204 if (primary_.PointerIsMine(p))
205 return primary_.PointsIntoChunk(p);
209 // Iterate over all existing chunks.
210 // The allocator must be locked when calling this function.
211 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
212 primary_.ForEachChunk(callback, arg);
213 secondary_.ForEachChunk(callback, arg);
217 PrimaryAllocator primary_;
218 SecondaryAllocator secondary_;
219 AllocatorGlobalStats stats_;
220 atomic_uint8_t may_return_null_;
221 atomic_uint8_t rss_limit_is_exceeded_;