2 //Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
5 //Redistribution and use in source and binary forms, with or without
6 //modification, are permitted provided that the following conditions
9 // Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
12 // Redistributions in binary form must reproduce the above
13 // copyright notice, this list of conditions and the following
14 // disclaimer in the documentation and/or other materials provided
15 // with the distribution.
17 // Neither the name of 3Dlabs Inc. Ltd. nor the names of its
18 // contributors may be used to endorse or promote products derived
19 // from this software without specific prior written permission.
21 //THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 //"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 //LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 //FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 //COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 //INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 //BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 //LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
29 //CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 //LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
31 //ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 //POSSIBILITY OF SUCH DAMAGE.
35 #include "../Include/PoolAlloc.h"
36 #include "../Include/Common.h"
38 #include "../Include/InitializeGlobals.h"
39 #include "osinclude.h"
43 OS_TLSIndex PoolIndex;
45 void InitializeMemoryPools()
47 TThreadMemoryPools* pools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
51 TPoolAllocator *threadPoolAllocator = new TPoolAllocator();
53 TThreadMemoryPools* threadData = new TThreadMemoryPools();
55 threadData->threadPoolAllocator = threadPoolAllocator;
57 OS_SetTLSValue(PoolIndex, threadData);
60 void FreeGlobalPools()
62 // Release the allocated memory for this thread.
63 TThreadMemoryPools* globalPools = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
67 GetThreadPoolAllocator().popAll();
68 delete &GetThreadPoolAllocator();
72 bool InitializePoolIndex()
74 // Allocate a TLS index.
75 if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
83 // Release the TLS index.
84 OS_FreeTLSIndex(PoolIndex);
87 TPoolAllocator& GetThreadPoolAllocator()
89 TThreadMemoryPools* threadData = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
91 return *threadData->threadPoolAllocator;
94 void SetThreadPoolAllocator(TPoolAllocator& poolAllocator)
96 TThreadMemoryPools* threadData = static_cast<TThreadMemoryPools*>(OS_GetTLSValue(PoolIndex));
98 threadData->threadPoolAllocator = &poolAllocator;
102 // Implement the functionality of the TPoolAllocator class, which
103 // is documented in PoolAlloc.h.
105 TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
106 pageSize(growthIncrement),
107 alignment(allocationAlignment),
113 // Don't allow page sizes we know are smaller than all common
116 if (pageSize < 4*1024)
120 // A large currentPageOffset indicates a new page needs to
121 // be obtained to allocate memory.
123 currentPageOffset = pageSize;
126 // Adjust alignment to be at least pointer aligned and
129 size_t minAlign = sizeof(void*);
130 alignment &= ~(minAlign - 1);
131 if (alignment < minAlign)
132 alignment = minAlign;
134 while (a < alignment)
137 alignmentMask = a - 1;
142 headerSkip = minAlign;
143 if (headerSkip < sizeof(tHeader)) {
144 headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
150 TPoolAllocator::~TPoolAllocator()
153 tHeader* next = inUseList->nextPage;
154 inUseList->~tHeader();
155 delete [] reinterpret_cast<char*>(inUseList);
160 // Always delete the free list memory - it can't be being
161 // (correctly) referenced, whether the pool allocator was
162 // global or not. We should not check the guard blocks
163 // here, because we did it already when the block was
164 // placed into the free list.
167 tHeader* next = freeList->nextPage;
168 delete [] reinterpret_cast<char*>(freeList);
173 const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
174 const unsigned char TAllocation::guardBlockEndVal = 0xfe;
175 const unsigned char TAllocation::userDataFill = 0xcd;
178 const size_t TAllocation::guardBlockSize = 16;
180 const size_t TAllocation::guardBlockSize = 0;
184 // Check a single guard block for damage
187 void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
189 void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
193 for (int x = 0; x < guardBlockSize; x++) {
194 if (blockMem[x] != val) {
195 const int maxSize = 80;
198 // We don't print the assert message. It's here just to be helpful.
199 snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
200 locText, size, data());
201 assert(0 && "PoolAlloc: Damage in guard block");
205 assert(guardBlockSize == 0);
210 void TPoolAllocator::push()
212 tAllocState state = { currentPageOffset, inUseList };
214 stack.push_back(state);
217 // Indicate there is no current page to allocate from.
219 currentPageOffset = pageSize;
223 // Do a mass-deallocation of all the individual allocations
224 // that have occurred since the last push(), or since the
225 // last pop(), or since the object's creation.
227 // The deallocated pages are saved for future allocations.
229 void TPoolAllocator::pop()
231 if (stack.size() < 1)
234 tHeader* page = stack.back().page;
235 currentPageOffset = stack.back().offset;
237 while (inUseList != page) {
238 // invoke destructor to free allocation list
239 inUseList->~tHeader();
241 tHeader* nextInUse = inUseList->nextPage;
242 if (inUseList->pageCount > 1)
243 delete [] reinterpret_cast<char*>(inUseList);
245 inUseList->nextPage = freeList;
246 freeList = inUseList;
248 inUseList = nextInUse;
255 // Do a mass-deallocation of all the individual allocations
256 // that have occurred.
258 void TPoolAllocator::popAll()
260 while (stack.size() > 0)
264 void* TPoolAllocator::allocate(size_t numBytes)
266 // If we are using guard blocks, all allocations are bracketed by
267 // them: [guardblock][allocation][guardblock]. numBytes is how
268 // much memory the caller asked for. allocationSize is the total
269 // size including guard blocks. In release build,
270 // guardBlockSize=0 and this all gets optimized away.
271 size_t allocationSize = TAllocation::allocationSize(numBytes);
274 // Just keep some interesting statistics.
277 totalBytes += numBytes;
280 // Do the allocation, most likely case first, for efficiency.
281 // This step could be moved to be inline sometime.
283 if (currentPageOffset + allocationSize <= pageSize) {
285 // Safe to allocate from currentPageOffset.
287 unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
288 currentPageOffset += allocationSize;
289 currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
291 return initializeAllocation(inUseList, memory, numBytes);
294 if (allocationSize + headerSkip > pageSize) {
296 // Do a multi-page allocation. Don't mix these with the others.
297 // The OS is efficient and allocating and free-ing multiple pages.
299 size_t numBytesToAlloc = allocationSize + headerSkip;
300 tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
304 // Use placement-new to initialize header
305 new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
308 currentPageOffset = pageSize; // make next allocation come from a new page
310 // No guard blocks for multi-page allocations (yet)
311 return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
315 // Need a simple page to allocate from.
320 freeList = freeList->nextPage;
322 memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
327 // Use placement-new to initialize header
328 new(memory) tHeader(inUseList, 1);
331 unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
332 currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
334 return initializeAllocation(inUseList, ret, numBytes);
339 // Check all allocations in a list for damage by calling check on each.
341 void TAllocation::checkAllocList() const
343 for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
347 } // end namespace glslang