1 /*-------------------------------------------------------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Memory allocation callback utilities.
22 *//*--------------------------------------------------------------------*/
24 #include "vkAllocationCallbackUtil.hpp"
25 #include "tcuFormatUtil.hpp"
26 #include "tcuTestLog.hpp"
27 #include "deSTLUtil.hpp"
35 // System default allocator
37 static VKAPI_ATTR void* VKAPI_CALL systemAllocate (void*, size_t size, size_t alignment, VkSystemAllocationScope)
40 return deAlignedMalloc(size, (deUint32)alignment);
45 static VKAPI_ATTR void VKAPI_CALL systemFree (void*, void* pMem)
50 static VKAPI_ATTR void* VKAPI_CALL systemReallocate (void*, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope)
52 return deAlignedRealloc(pOriginal, size, alignment);
55 static VKAPI_ATTR void VKAPI_CALL systemInternalAllocationNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
59 static VKAPI_ATTR void VKAPI_CALL systemInternalFreeNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
63 static const VkAllocationCallbacks s_systemAllocator =
69 systemInternalAllocationNotification,
70 systemInternalFreeNotification,
73 const VkAllocationCallbacks* getSystemAllocator (void)
75 return &s_systemAllocator;
78 // AllocationCallbacks
80 static VKAPI_ATTR void* VKAPI_CALL allocationCallback (void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
82 return reinterpret_cast<AllocationCallbacks*>(pUserData)->allocate(size, alignment, allocationScope);
85 static VKAPI_ATTR void* VKAPI_CALL reallocationCallback (void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
87 return reinterpret_cast<AllocationCallbacks*>(pUserData)->reallocate(pOriginal, size, alignment, allocationScope);
90 static VKAPI_ATTR void VKAPI_CALL freeCallback (void* pUserData, void* pMem)
92 reinterpret_cast<AllocationCallbacks*>(pUserData)->free(pMem);
95 static VKAPI_ATTR void VKAPI_CALL internalAllocationNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
97 reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalAllocation(size, allocationType, allocationScope);
100 static VKAPI_ATTR void VKAPI_CALL internalFreeNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
102 reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalFree(size, allocationType, allocationScope);
105 static VkAllocationCallbacks makeCallbacks (AllocationCallbacks* object)
107 const VkAllocationCallbacks callbacks =
109 reinterpret_cast<void*>(object),
111 reallocationCallback,
113 internalAllocationNotificationCallback,
114 internalFreeNotificationCallback
119 AllocationCallbacks::AllocationCallbacks (void)
120 : m_callbacks(makeCallbacks(this))
124 AllocationCallbacks::~AllocationCallbacks (void)
128 // AllocationCallbackRecord
130 AllocationCallbackRecord AllocationCallbackRecord::allocation (size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
132 AllocationCallbackRecord record;
134 record.type = TYPE_ALLOCATION;
135 record.data.allocation.size = size;
136 record.data.allocation.alignment = alignment;
137 record.data.allocation.scope = scope;
138 record.data.allocation.returnedPtr = returnedPtr;
143 AllocationCallbackRecord AllocationCallbackRecord::reallocation (void* original, size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
145 AllocationCallbackRecord record;
147 record.type = TYPE_REALLOCATION;
148 record.data.reallocation.original = original;
149 record.data.reallocation.size = size;
150 record.data.reallocation.alignment = alignment;
151 record.data.reallocation.scope = scope;
152 record.data.reallocation.returnedPtr = returnedPtr;
157 AllocationCallbackRecord AllocationCallbackRecord::free (void* mem)
159 AllocationCallbackRecord record;
161 record.type = TYPE_FREE;
162 record.data.free.mem = mem;
167 AllocationCallbackRecord AllocationCallbackRecord::internalAllocation (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
169 AllocationCallbackRecord record;
171 record.type = TYPE_INTERNAL_ALLOCATION;
172 record.data.internalAllocation.size = size;
173 record.data.internalAllocation.type = type;
174 record.data.internalAllocation.scope = scope;
179 AllocationCallbackRecord AllocationCallbackRecord::internalFree (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
181 AllocationCallbackRecord record;
183 record.type = TYPE_INTERNAL_FREE;
184 record.data.internalAllocation.size = size;
185 record.data.internalAllocation.type = type;
186 record.data.internalAllocation.scope = scope;
193 ChainedAllocator::ChainedAllocator (const VkAllocationCallbacks* nextAllocator)
194 : m_nextAllocator(nextAllocator)
198 ChainedAllocator::~ChainedAllocator (void)
202 void* ChainedAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
204 return m_nextAllocator->pfnAllocation(m_nextAllocator->pUserData, size, alignment, allocationScope);
207 void* ChainedAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
209 return m_nextAllocator->pfnReallocation(m_nextAllocator->pUserData, original, size, alignment, allocationScope);
212 void ChainedAllocator::free (void* mem)
214 m_nextAllocator->pfnFree(m_nextAllocator->pUserData, mem);
217 void ChainedAllocator::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
219 m_nextAllocator->pfnInternalAllocation(m_nextAllocator->pUserData, size, allocationType, allocationScope);
222 void ChainedAllocator::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
224 m_nextAllocator->pfnInternalFree(m_nextAllocator->pUserData, size, allocationType, allocationScope);
227 // AllocationCallbackRecorder
229 AllocationCallbackRecorder::AllocationCallbackRecorder (const VkAllocationCallbacks* allocator, deUint32 callCountHint)
230 : ChainedAllocator (allocator)
231 , m_records (callCountHint)
235 AllocationCallbackRecorder::~AllocationCallbackRecorder (void)
239 void* AllocationCallbackRecorder::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
241 void* const ptr = ChainedAllocator::allocate(size, alignment, allocationScope);
243 m_records.append(AllocationCallbackRecord::allocation(size, alignment, allocationScope, ptr));
248 void* AllocationCallbackRecorder::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
250 void* const ptr = ChainedAllocator::reallocate(original, size, alignment, allocationScope);
252 m_records.append(AllocationCallbackRecord::reallocation(original, size, alignment, allocationScope, ptr));
257 void AllocationCallbackRecorder::free (void* mem)
259 ChainedAllocator::free(mem);
261 m_records.append(AllocationCallbackRecord::free(mem));
264 void AllocationCallbackRecorder::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
266 ChainedAllocator::notifyInternalAllocation(size, allocationType, allocationScope);
268 m_records.append(AllocationCallbackRecord::internalAllocation(size, allocationType, allocationScope));
271 void AllocationCallbackRecorder::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
273 ChainedAllocator::notifyInternalFree(size, allocationType, allocationScope);
275 m_records.append(AllocationCallbackRecord::internalFree(size, allocationType, allocationScope));
278 // DeterministicFailAllocator
280 DeterministicFailAllocator::DeterministicFailAllocator (const VkAllocationCallbacks* allocator, deUint32 numPassingAllocs)
281 : ChainedAllocator (allocator)
282 , m_numPassingAllocs(numPassingAllocs)
283 , m_allocationNdx (0)
287 DeterministicFailAllocator::~DeterministicFailAllocator (void)
291 void* DeterministicFailAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
293 if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
294 return ChainedAllocator::allocate(size, alignment, allocationScope);
299 void* DeterministicFailAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
301 if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
302 return ChainedAllocator::reallocate(original, size, alignment, allocationScope);
309 AllocationCallbackValidationResults::AllocationCallbackValidationResults (void)
311 deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
314 void AllocationCallbackValidationResults::clear (void)
316 liveAllocations.clear();
318 deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
324 struct AllocationSlot
326 AllocationCallbackRecord record;
329 AllocationSlot (void)
333 AllocationSlot (const AllocationCallbackRecord& record_, bool isLive_)
339 size_t getAlignment (const AllocationCallbackRecord& record)
341 if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION)
342 return record.data.allocation.alignment;
343 else if (record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
344 return record.data.reallocation.alignment;
354 void validateAllocationCallbacks (const AllocationCallbackRecorder& recorder, AllocationCallbackValidationResults* results)
356 std::vector<AllocationSlot> allocations;
357 std::map<void*, size_t> ptrToSlotIndex;
359 DE_ASSERT(results->liveAllocations.empty() && results->violations.empty());
361 for (AllocationCallbackRecorder::RecordIterator callbackIter = recorder.getRecordsBegin();
362 callbackIter != recorder.getRecordsEnd();
365 const AllocationCallbackRecord& record = *callbackIter;
369 const VkSystemAllocationScope* const scopePtr = record.type == AllocationCallbackRecord::TYPE_ALLOCATION ? &record.data.allocation.scope
370 : record.type == AllocationCallbackRecord::TYPE_REALLOCATION ? &record.data.reallocation.scope
371 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? &record.data.internalAllocation.scope
372 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE ? &record.data.internalAllocation.scope
375 if (scopePtr && !de::inBounds(*scopePtr, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST))
376 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE));
379 // Validate alignment
380 if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION ||
381 record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
383 if (!deIsPowerOfTwoSize(getAlignment(record)))
384 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALIGNMENT));
387 // Validate actual allocation behavior
390 case AllocationCallbackRecord::TYPE_ALLOCATION:
392 if (record.data.allocation.returnedPtr)
394 if (!de::contains(ptrToSlotIndex, record.data.allocation.returnedPtr))
396 ptrToSlotIndex[record.data.allocation.returnedPtr] = allocations.size();
397 allocations.push_back(AllocationSlot(record, true));
401 const size_t slotNdx = ptrToSlotIndex[record.data.allocation.returnedPtr];
402 if (!allocations[slotNdx].isLive)
404 allocations[slotNdx].isLive = true;
405 allocations[slotNdx].record = record;
409 // we should not have multiple live allocations with the same pointer
418 case AllocationCallbackRecord::TYPE_REALLOCATION:
420 if (de::contains(ptrToSlotIndex, record.data.reallocation.original))
422 const size_t origSlotNdx = ptrToSlotIndex[record.data.reallocation.original];
423 AllocationSlot& origSlot = allocations[origSlotNdx];
425 DE_ASSERT(record.data.reallocation.original != DE_NULL);
427 if (record.data.reallocation.size > 0)
429 if (getAlignment(origSlot.record) != record.data.reallocation.alignment)
430 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT));
432 if (record.data.reallocation.original == record.data.reallocation.returnedPtr)
434 if (!origSlot.isLive)
436 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_FREED_PTR));
437 origSlot.isLive = true; // Mark live to suppress further errors
440 // Just update slot record
441 allocations[origSlotNdx].record = record;
445 if (record.data.reallocation.returnedPtr)
447 allocations[origSlotNdx].isLive = false;
448 if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
450 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
451 allocations.push_back(AllocationSlot(record, true));
455 const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
456 if (!allocations[slotNdx].isLive)
458 allocations[slotNdx].isLive = true;
459 allocations[slotNdx].record = record;
463 // we should not have multiple live allocations with the same pointer
468 // else original ptr remains valid and live
473 DE_ASSERT(!record.data.reallocation.returnedPtr);
475 origSlot.isLive = false;
480 if (record.data.reallocation.original)
481 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR));
483 if (record.data.reallocation.returnedPtr)
485 DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
486 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
487 allocations.push_back(AllocationSlot(record, true));
494 case AllocationCallbackRecord::TYPE_FREE:
496 if (record.data.free.mem != DE_NULL) // Freeing null pointer is valid and ignored
498 if (de::contains(ptrToSlotIndex, record.data.free.mem))
500 const size_t slotNdx = ptrToSlotIndex[record.data.free.mem];
502 if (allocations[slotNdx].isLive)
503 allocations[slotNdx].isLive = false;
505 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_DOUBLE_FREE));
508 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR));
514 case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
515 case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
517 if (de::inBounds(record.data.internalAllocation.type, (VkInternalAllocationType)0, VK_INTERNAL_ALLOCATION_TYPE_LAST))
519 size_t* const totalAllocSizePtr = &results->internalAllocationTotal[record.data.internalAllocation.type][record.data.internalAllocation.scope];
520 const size_t size = record.data.internalAllocation.size;
522 if (record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE)
524 if (*totalAllocSizePtr < size)
526 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL));
527 *totalAllocSizePtr = 0; // Reset to 0 to suppress compound errors
530 *totalAllocSizePtr -= size;
533 *totalAllocSizePtr += size;
536 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE));
546 DE_ASSERT(!de::contains(ptrToSlotIndex, DE_NULL));
548 // Collect live allocations
549 for (std::vector<AllocationSlot>::const_iterator slotIter = allocations.begin();
550 slotIter != allocations.end();
553 if (slotIter->isLive)
554 results->liveAllocations.push_back(slotIter->record);
558 bool checkAndLog (tcu::TestLog& log, const AllocationCallbackValidationResults& results, deUint32 allowedLiveAllocScopeBits)
564 if (!results.violations.empty())
566 for (size_t violationNdx = 0; violationNdx < results.violations.size(); ++violationNdx)
568 log << TestLog::Message << "VIOLATION " << (violationNdx+1)
569 << ": " << results.violations[violationNdx]
570 << " (" << results.violations[violationNdx].record << ")"
571 << TestLog::EndMessage;
574 log << TestLog::Message << "ERROR: Found " << results.violations.size() << " invalid allocation callbacks!" << TestLog::EndMessage;
577 // Verify live allocations
578 for (size_t liveNdx = 0; liveNdx < results.liveAllocations.size(); ++liveNdx)
580 const AllocationCallbackRecord& record = results.liveAllocations[liveNdx];
581 const VkSystemAllocationScope scope = record.type == AllocationCallbackRecord::TYPE_ALLOCATION ? record.data.allocation.scope
582 : record.type == AllocationCallbackRecord::TYPE_REALLOCATION ? record.data.reallocation.scope
583 : VK_SYSTEM_ALLOCATION_SCOPE_LAST;
585 DE_ASSERT(de::inBounds(scope, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST));
587 if ((allowedLiveAllocScopeBits & (1u << scope)) == 0)
589 log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << record << TestLog::EndMessage;
594 // Verify internal allocations
595 for (int internalAllocTypeNdx = 0; internalAllocTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocTypeNdx)
597 for (int scopeNdx = 0; scopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++scopeNdx)
599 const VkInternalAllocationType type = (VkInternalAllocationType)internalAllocTypeNdx;
600 const VkSystemAllocationScope scope = (VkSystemAllocationScope)scopeNdx;
601 const size_t totalAllocated = results.internalAllocationTotal[type][scope];
603 if ((allowedLiveAllocScopeBits & (1u << scopeNdx)) == 0 &&
606 log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << totalAllocated
607 << " bytes of (" << type << ", " << scope << ") internal memory is still allocated"
608 << TestLog::EndMessage;
615 log << TestLog::Message << "ERROR: Found " << numLeaks << " memory leaks!" << TestLog::EndMessage;
617 return results.violations.empty() && numLeaks == 0;
620 bool validateAndLog (tcu::TestLog& log, const AllocationCallbackRecorder& recorder, deUint32 allowedLiveAllocScopeBits)
622 AllocationCallbackValidationResults validationResults;
624 validateAllocationCallbacks(recorder, &validationResults);
626 return checkAndLog(log, validationResults, allowedLiveAllocScopeBits);
629 std::ostream& operator<< (std::ostream& str, const AllocationCallbackRecord& record)
633 case AllocationCallbackRecord::TYPE_ALLOCATION:
634 str << "ALLOCATION: size=" << record.data.allocation.size
635 << ", alignment=" << record.data.allocation.alignment
636 << ", scope=" << record.data.allocation.scope
637 << ", returnedPtr=" << tcu::toHex(record.data.allocation.returnedPtr);
640 case AllocationCallbackRecord::TYPE_REALLOCATION:
641 str << "REALLOCATION: original=" << tcu::toHex(record.data.reallocation.original)
642 << ", size=" << record.data.reallocation.size
643 << ", alignment=" << record.data.reallocation.alignment
644 << ", scope=" << record.data.reallocation.scope
645 << ", returnedPtr=" << tcu::toHex(record.data.reallocation.returnedPtr);
648 case AllocationCallbackRecord::TYPE_FREE:
649 str << "FREE: mem=" << tcu::toHex(record.data.free.mem);
652 case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
653 case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
654 str << "INTERNAL_" << (record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? "ALLOCATION" : "FREE")
655 << ": size=" << record.data.internalAllocation.size
656 << ", type=" << record.data.internalAllocation.type
657 << ", scope=" << record.data.internalAllocation.scope;
667 std::ostream& operator<< (std::ostream& str, const AllocationCallbackViolation& violation)
669 switch (violation.reason)
671 case AllocationCallbackViolation::REASON_DOUBLE_FREE:
673 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
674 str << "Double free of " << tcu::toHex(violation.record.data.free.mem);
678 case AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR:
680 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
681 str << "Attempt to free " << tcu::toHex(violation.record.data.free.mem) << " which has not been allocated";
685 case AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR:
687 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
688 str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has not been allocated";
692 case AllocationCallbackViolation::REASON_REALLOC_FREED_PTR:
694 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
695 str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has been freed";
699 case AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL:
701 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
702 str << "Internal allocation total for (" << violation.record.data.internalAllocation.type << ", " << violation.record.data.internalAllocation.scope << ") is negative";
706 case AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE:
708 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ||
709 violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
710 str << "Invalid internal allocation type " << tcu::toHex(violation.record.data.internalAllocation.type);
714 case AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE:
716 str << "Invalid allocation scope";
720 case AllocationCallbackViolation::REASON_INVALID_ALIGNMENT:
722 str << "Invalid alignment";
726 case AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT:
728 str << "Reallocation with different alignment";