1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
6 //===----------------------------------------------------------------------===//
8 // Part of the Sanitizer Allocator.
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
15 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
17 // SizeClassAllocator32 -- allocator for 32-bit address space.
18 // This allocator can theoretically be used on 64-bit arch, but there it is less
19 // efficient than SizeClassAllocator64.
21 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
22 // be returned by MmapOrDie().
25 // a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
27 // Since the regions are aligned by kRegionSize, there are exactly
28 // kNumPossibleRegions possible regions in the address space and so we keep
29 // a ByteMap possible_regions to store the size classes of each Region.
30 // 0 size class means the region is not used by the allocator.
32 // One Region is used to allocate chunks of a single size class.
33 // A Region looks like this:
34 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
36 // In order to avoid false sharing the objects of this class should be
37 // chache-line aligned.
38 template <const uptr kSpaceBeg, const u64 kSpaceSize,
39 const uptr kMetadataSize, class SizeClassMap,
40 const uptr kRegionSizeLog,
42 class MapUnmapCallback = NoOpMapUnmapCallback>
43 class SizeClassAllocator32 {
45 struct TransferBatch {
46 static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
47 void SetFromArray(uptr region_beg_unused, void *batch[], uptr count) {
49 CHECK_LE(count_, kMaxNumCached);
50 for (uptr i = 0; i < count; i++)
53 uptr Count() const { return count_; }
54 void Clear() { count_ = 0; }
56 batch_[count_++] = ptr;
57 CHECK_LE(count_, kMaxNumCached);
59 void CopyToArray(void *to_batch[]) {
60 for (uptr i = 0, n = Count(); i < n; i++)
61 to_batch[i] = batch_[i];
64 // How much memory do we need for a batch containing n elements.
65 static uptr AllocationSizeRequiredForNElements(uptr n) {
66 return sizeof(uptr) * 2 + sizeof(void *) * n;
68 static uptr MaxCached(uptr class_id) {
69 return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));
76 void *batch_[kMaxNumCached];
79 static const uptr kBatchSize = sizeof(TransferBatch);
80 COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
81 COMPILER_CHECK(sizeof(TransferBatch) ==
82 SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
84 static uptr ClassIdToSize(uptr class_id) {
85 return SizeClassMap::Size(class_id);
88 typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
89 SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
90 typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
93 possible_regions.TestOnlyInit();
94 internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
97 void *MapWithCallback(uptr size) {
98 size = RoundUpTo(size, GetPageSizeCached());
99 void *res = MmapOrDie(size, "SizeClassAllocator32");
100 MapUnmapCallback().OnMap((uptr)res, size);
104 void UnmapWithCallback(uptr beg, uptr size) {
105 MapUnmapCallback().OnUnmap(beg, size);
106 UnmapOrDie(reinterpret_cast<void *>(beg), size);
109 static bool CanAllocate(uptr size, uptr alignment) {
110 return size <= SizeClassMap::kMaxSize &&
111 alignment <= SizeClassMap::kMaxSize;
114 void *GetMetaData(const void *p) {
115 CHECK(PointerIsMine(p));
116 uptr mem = reinterpret_cast<uptr>(p);
117 uptr beg = ComputeRegionBeg(mem);
118 uptr size = ClassIdToSize(GetSizeClass(p));
119 u32 offset = mem - beg;
120 uptr n = offset / (u32)size; // 32-bit division
121 uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
122 return reinterpret_cast<void*>(meta);
125 bool PointsIntoChunk(const void *p) {
126 CHECK(PointerIsMine(p));
127 uptr mem = reinterpret_cast<uptr>(p);
128 uptr beg = ComputeRegionBeg(mem);
129 uptr size = SizeClassMap::Size(GetSizeClass(p));
130 uptr n_chunks = kRegionSize / (size + kMetadataSize);
131 uptr gap_start = beg + n_chunks * size + 1;
132 return (beg <= mem) && (mem < gap_start);
135 NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
137 CHECK_LT(class_id, kNumClasses);
138 SizeClassInfo *sci = GetSizeClassInfo(class_id);
139 SpinMutexLock l(&sci->mutex);
140 if (sci->free_list.empty() &&
141 UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
143 CHECK(!sci->free_list.empty());
144 TransferBatch *b = sci->free_list.front();
145 sci->free_list.pop_front();
149 NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
151 CHECK_LT(class_id, kNumClasses);
152 SizeClassInfo *sci = GetSizeClassInfo(class_id);
153 SpinMutexLock l(&sci->mutex);
154 CHECK_GT(b->Count(), 0);
155 sci->free_list.push_front(b);
158 uptr GetRegionBeginBySizeClass(uptr class_id) { return 0; }
160 bool PointerIsMine(const void *p) {
161 uptr mem = reinterpret_cast<uptr>(p);
162 if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
164 return GetSizeClass(p) != 0;
167 uptr GetSizeClass(const void *p) {
168 return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
171 void *GetBlockBegin(const void *p) {
172 CHECK(PointerIsMine(p));
173 uptr mem = reinterpret_cast<uptr>(p);
174 uptr beg = ComputeRegionBeg(mem);
175 uptr size = ClassIdToSize(GetSizeClass(p));
176 u32 offset = mem - beg;
177 u32 n = offset / (u32)size; // 32-bit division
178 uptr res = beg + (n * (u32)size);
179 return reinterpret_cast<void*>(res);
182 uptr GetActuallyAllocatedSize(void *p) {
183 CHECK(PointerIsMine(p));
184 return ClassIdToSize(GetSizeClass(p));
187 uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
189 uptr TotalMemoryUsed() {
190 // No need to lock here.
192 for (uptr i = 0; i < kNumPossibleRegions; i++)
193 if (possible_regions[i])
198 void TestOnlyUnmap() {
199 for (uptr i = 0; i < kNumPossibleRegions; i++)
200 if (possible_regions[i])
201 UnmapWithCallback((i * kRegionSize), kRegionSize);
204 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
205 // introspection API.
207 for (uptr i = 0; i < kNumClasses; i++) {
208 GetSizeClassInfo(i)->mutex.Lock();
213 for (int i = kNumClasses - 1; i >= 0; i--) {
214 GetSizeClassInfo(i)->mutex.Unlock();
218 // Iterate over all existing chunks.
219 // The allocator must be locked when calling this function.
220 void ForEachChunk(ForEachChunkCallback callback, void *arg) {
221 for (uptr region = 0; region < kNumPossibleRegions; region++)
222 if (possible_regions[region]) {
223 uptr chunk_size = ClassIdToSize(possible_regions[region]);
224 uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
225 uptr region_beg = region * kRegionSize;
226 for (uptr chunk = region_beg;
227 chunk < region_beg + max_chunks_in_region * chunk_size;
228 chunk += chunk_size) {
229 // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
230 callback(chunk, arg);
238 static uptr AdditionalSize() {
242 // This is empty here. Currently only implemented in 64-bit allocator.
243 void ReleaseToOS() { }
246 typedef SizeClassMap SizeClassMapT;
247 static const uptr kNumClasses = SizeClassMap::kNumClasses;
250 static const uptr kRegionSize = 1 << kRegionSizeLog;
251 static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
253 struct SizeClassInfo {
255 IntrusiveList<TransferBatch> free_list;
256 char padding[kCacheLineSize - sizeof(uptr) -
257 sizeof(IntrusiveList<TransferBatch>)];
259 COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
261 uptr ComputeRegionId(uptr mem) {
262 uptr res = mem >> kRegionSizeLog;
263 CHECK_LT(res, kNumPossibleRegions);
267 uptr ComputeRegionBeg(uptr mem) {
268 return mem & ~(kRegionSize - 1);
271 uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
272 CHECK_LT(class_id, kNumClasses);
273 uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
274 kRegionSize, kRegionSize, "SizeClassAllocator32"));
277 MapUnmapCallback().OnMap(res, kRegionSize);
278 stat->Add(AllocatorStatMapped, kRegionSize);
279 CHECK_EQ(0U, (res & (kRegionSize - 1)));
280 possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
284 SizeClassInfo *GetSizeClassInfo(uptr class_id) {
285 CHECK_LT(class_id, kNumClasses);
286 return &size_class_info_array[class_id];
289 bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
290 SizeClassInfo *sci, uptr class_id) {
291 uptr size = ClassIdToSize(class_id);
292 uptr reg = AllocateRegion(stat, class_id);
295 uptr n_chunks = kRegionSize / (size + kMetadataSize);
296 uptr max_count = TransferBatch::MaxCached(class_id);
297 TransferBatch *b = nullptr;
298 for (uptr i = reg; i < reg + n_chunks * size; i += size) {
300 b = c->CreateBatch(class_id, this, (TransferBatch*)i);
306 if (b->Count() == max_count) {
307 CHECK_GT(b->Count(), 0);
308 sci->free_list.push_back(b);
313 CHECK_GT(b->Count(), 0);
314 sci->free_list.push_back(b);
319 ByteMap possible_regions;
320 SizeClassInfo size_class_info_array[kNumClasses];