2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "MetaAllocator.h"
32 #include <wtf/DataLog.h>
33 #include <wtf/FastMalloc.h>
37 MetaAllocator::~MetaAllocator()
39 for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node;) {
40 FreeSpaceNode* next = node->successor();
41 m_freeSpaceSizeMap.remove(node);
42 freeFreeSpaceNode(node);
47 ASSERT(!m_mallocBalance);
51 void MetaAllocatorTracker::notify(MetaAllocatorHandle* handle)
53 m_allocations.insert(handle);
56 void MetaAllocatorTracker::release(MetaAllocatorHandle* handle)
58 m_allocations.remove(handle);
61 ALWAYS_INLINE void MetaAllocator::release(MetaAllocatorHandle* handle)
63 SpinLockHolder locker(&m_lock);
64 if (handle->sizeInBytes()) {
65 decrementPageOccupancy(handle->start(), handle->sizeInBytes());
66 addFreeSpaceFromReleasedHandle(handle->start(), handle->sizeInBytes());
69 if (UNLIKELY(!!m_tracker))
70 m_tracker->release(handle);
73 MetaAllocatorHandle::MetaAllocatorHandle(MetaAllocator* allocator, void* start, size_t sizeInBytes, void* ownerUID)
74 : m_allocator(allocator)
76 , m_sizeInBytes(sizeInBytes)
77 , m_ownerUID(ownerUID)
85 MetaAllocatorHandle::~MetaAllocatorHandle()
88 m_allocator->release(this);
91 void MetaAllocatorHandle::shrink(size_t newSizeInBytes)
93 ASSERT(newSizeInBytes <= m_sizeInBytes);
95 SpinLockHolder locker(&m_allocator->m_lock);
97 newSizeInBytes = m_allocator->roundUp(newSizeInBytes);
99 ASSERT(newSizeInBytes <= m_sizeInBytes);
101 if (newSizeInBytes == m_sizeInBytes)
104 uintptr_t freeStart = reinterpret_cast<uintptr_t>(m_start) + newSizeInBytes;
105 size_t freeSize = m_sizeInBytes - newSizeInBytes;
106 uintptr_t freeEnd = freeStart + freeSize;
108 uintptr_t firstCompletelyFreePage = (freeStart + m_allocator->m_pageSize - 1) & ~(m_allocator->m_pageSize - 1);
109 if (firstCompletelyFreePage < freeEnd)
110 m_allocator->decrementPageOccupancy(reinterpret_cast<void*>(firstCompletelyFreePage), freeSize - (firstCompletelyFreePage - freeStart));
112 m_allocator->addFreeSpaceFromReleasedHandle(reinterpret_cast<void*>(freeStart), freeSize);
114 m_sizeInBytes = newSizeInBytes;
117 MetaAllocator::MetaAllocator(size_t allocationGranule)
118 : m_allocationGranule(allocationGranule)
119 , m_pageSize(pageSize())
120 , m_bytesAllocated(0)
122 , m_bytesCommitted(0)
127 #if ENABLE(META_ALLOCATOR_PROFILE)
128 , m_numAllocations(0)
134 for (m_logPageSize = 0; m_logPageSize < 32; ++m_logPageSize) {
135 if (static_cast<size_t>(1) << m_logPageSize == m_pageSize)
139 ASSERT(static_cast<size_t>(1) << m_logPageSize == m_pageSize);
141 for (m_logAllocationGranule = 0; m_logAllocationGranule < 32; ++m_logAllocationGranule) {
142 if (static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule)
146 ASSERT(static_cast<size_t>(1) << m_logAllocationGranule == m_allocationGranule);
149 PassRefPtr<MetaAllocatorHandle> MetaAllocator::allocate(size_t sizeInBytes, void* ownerUID)
151 SpinLockHolder locker(&m_lock);
156 sizeInBytes = roundUp(sizeInBytes);
158 void* start = findAndRemoveFreeSpace(sizeInBytes);
160 size_t requestedNumberOfPages = (sizeInBytes + m_pageSize - 1) >> m_logPageSize;
161 size_t numberOfPages = requestedNumberOfPages;
163 start = allocateNewSpace(numberOfPages);
167 ASSERT(numberOfPages >= requestedNumberOfPages);
169 size_t roundedUpSize = numberOfPages << m_logPageSize;
171 ASSERT(roundedUpSize >= sizeInBytes);
173 m_bytesReserved += roundedUpSize;
175 if (roundedUpSize > sizeInBytes) {
176 void* freeSpaceStart = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
177 size_t freeSpaceSize = roundedUpSize - sizeInBytes;
178 addFreeSpace(freeSpaceStart, freeSpaceSize);
181 incrementPageOccupancy(start, sizeInBytes);
182 m_bytesAllocated += sizeInBytes;
183 #if ENABLE(META_ALLOCATOR_PROFILE)
187 MetaAllocatorHandle* handle = new MetaAllocatorHandle(this, start, sizeInBytes, ownerUID);
189 if (UNLIKELY(!!m_tracker))
190 m_tracker->notify(handle);
192 return adoptRef(handle);
195 MetaAllocator::Statistics MetaAllocator::currentStatistics()
197 SpinLockHolder locker(&m_lock);
199 result.bytesAllocated = m_bytesAllocated;
200 result.bytesReserved = m_bytesReserved;
201 result.bytesCommitted = m_bytesCommitted;
205 void* MetaAllocator::findAndRemoveFreeSpace(size_t sizeInBytes)
207 FreeSpaceNode* node = m_freeSpaceSizeMap.findLeastGreaterThanOrEqual(sizeInBytes);
212 ASSERT(node->m_sizeInBytes >= sizeInBytes);
214 m_freeSpaceSizeMap.remove(node);
218 if (node->m_sizeInBytes == sizeInBytes) {
219 // Easy case: perfect fit, so just remove the node entirely.
220 result = node->m_start;
222 m_freeSpaceStartAddressMap.remove(node->m_start);
223 m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes));
224 freeFreeSpaceNode(node);
226 // Try to be a good citizen and ensure that the returned chunk of memory
227 // straddles as few pages as possible, but only insofar as doing so will
228 // not increase fragmentation. The intuition is that minimizing
229 // fragmentation is a strictly higher priority than minimizing the number
230 // of committed pages, since in the long run, smaller fragmentation means
231 // fewer committed pages and fewer failures in general.
233 uintptr_t firstPage = reinterpret_cast<uintptr_t>(node->m_start) >> m_logPageSize;
234 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes - 1) >> m_logPageSize;
236 uintptr_t lastPageForLeftAllocation = (reinterpret_cast<uintptr_t>(node->m_start) + sizeInBytes - 1) >> m_logPageSize;
237 uintptr_t firstPageForRightAllocation = (reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes - sizeInBytes) >> m_logPageSize;
239 if (lastPageForLeftAllocation - firstPage + 1 <= lastPage - firstPageForRightAllocation + 1) {
240 // Allocate in the left side of the returned chunk, and slide the node to the right.
241 result = node->m_start;
243 m_freeSpaceStartAddressMap.remove(node->m_start);
245 node->m_sizeInBytes -= sizeInBytes;
246 node->m_start = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + sizeInBytes);
248 m_freeSpaceSizeMap.insert(node);
249 m_freeSpaceStartAddressMap.add(node->m_start, node);
251 // Allocate in the right size of the returned chunk, and slide the node to the left;
253 result = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes - sizeInBytes);
255 m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes));
257 node->m_sizeInBytes -= sizeInBytes;
259 m_freeSpaceSizeMap.insert(node);
260 m_freeSpaceEndAddressMap.add(result, node);
264 #if ENABLE(META_ALLOCATOR_PROFILE)
271 void MetaAllocator::addFreeSpaceFromReleasedHandle(void* start, size_t sizeInBytes)
273 #if ENABLE(META_ALLOCATOR_PROFILE)
276 m_bytesAllocated -= sizeInBytes;
277 addFreeSpace(start, sizeInBytes);
280 void MetaAllocator::addFreshFreeSpace(void* start, size_t sizeInBytes)
282 SpinLockHolder locker(&m_lock);
283 m_bytesReserved += sizeInBytes;
284 addFreeSpace(start, sizeInBytes);
287 size_t MetaAllocator::debugFreeSpaceSize()
290 SpinLockHolder locker(&m_lock);
292 for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node; node = node->successor())
293 result += node->m_sizeInBytes;
301 void MetaAllocator::addFreeSpace(void* start, size_t sizeInBytes)
303 void* end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
305 HashMap<void*, FreeSpaceNode*>::iterator leftNeighbor = m_freeSpaceEndAddressMap.find(start);
306 HashMap<void*, FreeSpaceNode*>::iterator rightNeighbor = m_freeSpaceStartAddressMap.find(end);
308 if (leftNeighbor != m_freeSpaceEndAddressMap.end()) {
309 // We have something we can coalesce with on the left. Remove it from the tree, and
310 // remove its end from the end address map.
312 ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftNeighbor->second->m_start) + leftNeighbor->second->m_sizeInBytes) == leftNeighbor->first);
314 FreeSpaceNode* leftNode = leftNeighbor->second;
316 void* leftStart = leftNode->m_start;
317 size_t leftSize = leftNode->m_sizeInBytes;
318 void* leftEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize);
320 ASSERT(leftEnd == start);
322 m_freeSpaceSizeMap.remove(leftNode);
323 m_freeSpaceEndAddressMap.remove(leftEnd);
325 // Now check if there is also something to coalesce with on the right.
326 if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
327 // Freeing something in the middle of free blocks. Coalesce both left and
328 // right, whilst removing the right neighbor from the maps.
330 ASSERT(rightNeighbor->second->m_start == rightNeighbor->first);
332 FreeSpaceNode* rightNode = rightNeighbor->second;
333 void* rightStart = rightNeighbor->first;
334 size_t rightSize = rightNode->m_sizeInBytes;
335 void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
337 ASSERT(rightStart == end);
338 ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize + sizeInBytes + rightSize) == rightEnd);
340 m_freeSpaceSizeMap.remove(rightNode);
341 m_freeSpaceStartAddressMap.remove(rightStart);
342 m_freeSpaceEndAddressMap.remove(rightEnd);
344 freeFreeSpaceNode(rightNode);
346 leftNode->m_sizeInBytes += sizeInBytes + rightSize;
348 m_freeSpaceSizeMap.insert(leftNode);
349 m_freeSpaceEndAddressMap.add(rightEnd, leftNode);
351 leftNode->m_sizeInBytes += sizeInBytes;
353 m_freeSpaceSizeMap.insert(leftNode);
354 m_freeSpaceEndAddressMap.add(end, leftNode);
357 // Cannot coalesce with left; try to see if we can coalesce with right.
359 if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
360 FreeSpaceNode* rightNode = rightNeighbor->second;
361 void* rightStart = rightNeighbor->first;
362 size_t rightSize = rightNode->m_sizeInBytes;
363 void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
365 ASSERT(rightStart == end);
366 ASSERT_UNUSED(rightEnd, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes + rightSize) == rightEnd);
368 m_freeSpaceSizeMap.remove(rightNode);
369 m_freeSpaceStartAddressMap.remove(rightStart);
371 rightNode->m_sizeInBytes += sizeInBytes;
372 rightNode->m_start = start;
374 m_freeSpaceSizeMap.insert(rightNode);
375 m_freeSpaceStartAddressMap.add(start, rightNode);
377 // Nothing to coalesce with, so create a new free space node and add it.
379 FreeSpaceNode* node = allocFreeSpaceNode();
381 node->m_sizeInBytes = sizeInBytes;
382 node->m_start = start;
384 m_freeSpaceSizeMap.insert(node);
385 m_freeSpaceStartAddressMap.add(start, node);
386 m_freeSpaceEndAddressMap.add(end, node);
390 #if ENABLE(META_ALLOCATOR_PROFILE)
395 void MetaAllocator::incrementPageOccupancy(void* address, size_t sizeInBytes)
397 uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
398 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
400 for (uintptr_t page = firstPage; page <= lastPage; ++page) {
401 HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
402 if (iter == m_pageOccupancyMap.end()) {
403 m_pageOccupancyMap.add(page, 1);
404 m_bytesCommitted += m_pageSize;
405 notifyNeedPage(reinterpret_cast<void*>(page << m_logPageSize));
411 void MetaAllocator::decrementPageOccupancy(void* address, size_t sizeInBytes)
413 uintptr_t firstPage = reinterpret_cast<uintptr_t>(address) >> m_logPageSize;
414 uintptr_t lastPage = (reinterpret_cast<uintptr_t>(address) + sizeInBytes - 1) >> m_logPageSize;
416 for (uintptr_t page = firstPage; page <= lastPage; ++page) {
417 HashMap<uintptr_t, size_t>::iterator iter = m_pageOccupancyMap.find(page);
418 ASSERT(iter != m_pageOccupancyMap.end());
419 if (!--(iter->second)) {
420 m_pageOccupancyMap.remove(iter);
421 m_bytesCommitted -= m_pageSize;
422 notifyPageIsFree(reinterpret_cast<void*>(page << m_logPageSize));
427 size_t MetaAllocator::roundUp(size_t sizeInBytes)
429 if (std::numeric_limits<size_t>::max() - m_allocationGranule <= sizeInBytes)
431 return (sizeInBytes + m_allocationGranule - 1) & ~(m_allocationGranule - 1);
434 MetaAllocator::FreeSpaceNode* MetaAllocator::allocFreeSpaceNode()
439 return new (NotNull, fastMalloc(sizeof(FreeSpaceNode))) FreeSpaceNode(0, 0);
442 void MetaAllocator::freeFreeSpaceNode(FreeSpaceNode* node)
450 #if ENABLE(META_ALLOCATOR_PROFILE)
451 void MetaAllocator::dumpProfile()
453 dataLog("%d: MetaAllocator(%p): num allocations = %u, num frees = %u, allocated = %lu, reserved = %lu, committed = %lu\n",
454 getpid(), this, m_numAllocations, m_numFrees, m_bytesAllocated, m_bytesReserved, m_bytesCommitted);