e7660709f524a509be6a08a4ec53ca2e83984f52
[platform/upstream/VK-GL-CTS.git] / external / vulkancts / framework / vulkan / vkAllocationCallbackUtil.cpp
1 /*-------------------------------------------------------------------------
2  * Vulkan CTS Framework
3  * --------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and/or associated documentation files (the
9  * "Materials"), to deal in the Materials without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sublicense, and/or sell copies of the Materials, and to
12  * permit persons to whom the Materials are furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice(s) and this permission notice shall be
16  * included in all copies or substantial portions of the Materials.
17  *
18  * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
22  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24  * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
25  *
26  *//*!
27  * \file
28  * \brief Memory allocation callback utilities.
29  *//*--------------------------------------------------------------------*/
30
31 #include "vkAllocationCallbackUtil.hpp"
32 #include "tcuFormatUtil.hpp"
33 #include "tcuTestLog.hpp"
34 #include "deSTLUtil.hpp"
35 #include "deMemory.h"
36
37 #include <map>
38
39 namespace vk
40 {
41
42 // System default allocator
43
44 static VKAPI_ATTR void* VKAPI_CALL systemAllocate (void*, size_t size, size_t alignment, VkSystemAllocationScope)
45 {
46         if (size > 0)
47                 return deAlignedMalloc(size, (deUint32)alignment);
48         else
49                 return DE_NULL;
50 }
51
52 static VKAPI_ATTR void VKAPI_CALL systemFree (void*, void* pMem)
53 {
54         deAlignedFree(pMem);
55 }
56
57 static VKAPI_ATTR void* VKAPI_CALL systemReallocate (void*, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope)
58 {
59         return deAlignedRealloc(pOriginal, size, alignment);
60 }
61
62 static VKAPI_ATTR void VKAPI_CALL systemInternalAllocationNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
63 {
64 }
65
66 static VKAPI_ATTR void VKAPI_CALL systemInternalFreeNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
67 {
68 }
69
70 static const VkAllocationCallbacks s_systemAllocator =
71 {
72         DE_NULL,                // pUserData
73         systemAllocate,
74         systemReallocate,
75         systemFree,
76         systemInternalAllocationNotification,
77         systemInternalFreeNotification,
78 };
79
80 const VkAllocationCallbacks* getSystemAllocator (void)
81 {
82         return &s_systemAllocator;
83 }
84
85 // AllocationCallbacks
86
87 static VKAPI_ATTR void* VKAPI_CALL allocationCallback (void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
88 {
89         return reinterpret_cast<AllocationCallbacks*>(pUserData)->allocate(size, alignment, allocationScope);
90 }
91
92 static VKAPI_ATTR void* VKAPI_CALL reallocationCallback (void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
93 {
94         return reinterpret_cast<AllocationCallbacks*>(pUserData)->reallocate(pOriginal, size, alignment, allocationScope);
95 }
96
97 static VKAPI_ATTR void VKAPI_CALL freeCallback (void* pUserData, void* pMem)
98 {
99         reinterpret_cast<AllocationCallbacks*>(pUserData)->free(pMem);
100 }
101
102 static VKAPI_ATTR void VKAPI_CALL internalAllocationNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
103 {
104         reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalAllocation(size, allocationType, allocationScope);
105 }
106
107 static VKAPI_ATTR void VKAPI_CALL internalFreeNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
108 {
109         reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalFree(size, allocationType, allocationScope);
110 }
111
112 static VkAllocationCallbacks makeCallbacks (AllocationCallbacks* object)
113 {
114         const VkAllocationCallbacks callbacks =
115         {
116                 reinterpret_cast<void*>(object),
117                 allocationCallback,
118                 reallocationCallback,
119                 freeCallback,
120                 internalAllocationNotificationCallback,
121                 internalFreeNotificationCallback
122         };
123         return callbacks;
124 }
125
126 AllocationCallbacks::AllocationCallbacks (void)
127         : m_callbacks(makeCallbacks(this))
128 {
129 }
130
131 AllocationCallbacks::~AllocationCallbacks (void)
132 {
133 }
134
135 // AllocationCallbackRecord
136
137 AllocationCallbackRecord AllocationCallbackRecord::allocation (size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
138 {
139         AllocationCallbackRecord record;
140
141         record.type                                                     = TYPE_ALLOCATION;
142         record.data.allocation.size                     = size;
143         record.data.allocation.alignment        = alignment;
144         record.data.allocation.scope            = scope;
145         record.data.allocation.returnedPtr      = returnedPtr;
146
147         return record;
148 }
149
150 AllocationCallbackRecord AllocationCallbackRecord::reallocation (void* original, size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
151 {
152         AllocationCallbackRecord record;
153
154         record.type                                                             = TYPE_REALLOCATION;
155         record.data.reallocation.original               = original;
156         record.data.reallocation.size                   = size;
157         record.data.reallocation.alignment              = alignment;
158         record.data.reallocation.scope                  = scope;
159         record.data.reallocation.returnedPtr    = returnedPtr;
160
161         return record;
162 }
163
164 AllocationCallbackRecord AllocationCallbackRecord::free (void* mem)
165 {
166         AllocationCallbackRecord record;
167
168         record.type                             = TYPE_FREE;
169         record.data.free.mem    = mem;
170
171         return record;
172 }
173
174 AllocationCallbackRecord AllocationCallbackRecord::internalAllocation (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
175 {
176         AllocationCallbackRecord record;
177
178         record.type                                                             = TYPE_INTERNAL_ALLOCATION;
179         record.data.internalAllocation.size             = size;
180         record.data.internalAllocation.type             = type;
181         record.data.internalAllocation.scope    = scope;
182
183         return record;
184 }
185
186 AllocationCallbackRecord AllocationCallbackRecord::internalFree (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
187 {
188         AllocationCallbackRecord record;
189
190         record.type                                                             = TYPE_INTERNAL_FREE;
191         record.data.internalAllocation.size             = size;
192         record.data.internalAllocation.type             = type;
193         record.data.internalAllocation.scope    = scope;
194
195         return record;
196 }
197
198 // ChainedAllocator
199
200 ChainedAllocator::ChainedAllocator (const VkAllocationCallbacks* nextAllocator)
201         : m_nextAllocator(nextAllocator)
202 {
203 }
204
205 ChainedAllocator::~ChainedAllocator (void)
206 {
207 }
208
209 void* ChainedAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
210 {
211         return m_nextAllocator->pfnAllocation(m_nextAllocator->pUserData, size, alignment, allocationScope);
212 }
213
214 void* ChainedAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
215 {
216         return m_nextAllocator->pfnReallocation(m_nextAllocator->pUserData, original, size, alignment, allocationScope);
217 }
218
219 void ChainedAllocator::free (void* mem)
220 {
221         m_nextAllocator->pfnFree(m_nextAllocator->pUserData, mem);
222 }
223
224 void ChainedAllocator::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
225 {
226         m_nextAllocator->pfnInternalAllocation(m_nextAllocator->pUserData, size, allocationType, allocationScope);
227 }
228
229 void ChainedAllocator::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
230 {
231         m_nextAllocator->pfnInternalFree(m_nextAllocator->pUserData, size, allocationType, allocationScope);
232 }
233
234 // AllocationCallbackRecorder
235
236 AllocationCallbackRecorder::AllocationCallbackRecorder (const VkAllocationCallbacks* allocator, deUint32 callCountHint)
237         : ChainedAllocator      (allocator)
238         , m_records                     (callCountHint)
239 {
240 }
241
242 AllocationCallbackRecorder::~AllocationCallbackRecorder (void)
243 {
244 }
245
246 void* AllocationCallbackRecorder::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
247 {
248         void* const     ptr     = ChainedAllocator::allocate(size, alignment, allocationScope);
249
250         m_records.append(AllocationCallbackRecord::allocation(size, alignment, allocationScope, ptr));
251
252         return ptr;
253 }
254
255 void* AllocationCallbackRecorder::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
256 {
257         void* const     ptr     = ChainedAllocator::reallocate(original, size, alignment, allocationScope);
258
259         m_records.append(AllocationCallbackRecord::reallocation(original, size, alignment, allocationScope, ptr));
260
261         return ptr;
262 }
263
264 void AllocationCallbackRecorder::free (void* mem)
265 {
266         ChainedAllocator::free(mem);
267
268         m_records.append(AllocationCallbackRecord::free(mem));
269 }
270
271 void AllocationCallbackRecorder::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
272 {
273         ChainedAllocator::notifyInternalAllocation(size, allocationType, allocationScope);
274
275         m_records.append(AllocationCallbackRecord::internalAllocation(size, allocationType, allocationScope));
276 }
277
278 void AllocationCallbackRecorder::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
279 {
280         ChainedAllocator::notifyInternalFree(size, allocationType, allocationScope);
281
282         m_records.append(AllocationCallbackRecord::internalFree(size, allocationType, allocationScope));
283 }
284
285 // DeterministicFailAllocator
286
287 DeterministicFailAllocator::DeterministicFailAllocator (const VkAllocationCallbacks* allocator, deUint32 numPassingAllocs)
288         : ChainedAllocator      (allocator)
289         , m_numPassingAllocs(numPassingAllocs)
290         , m_allocationNdx       (0)
291 {
292 }
293
294 DeterministicFailAllocator::~DeterministicFailAllocator (void)
295 {
296 }
297
298 void* DeterministicFailAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
299 {
300         if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
301                 return ChainedAllocator::allocate(size, alignment, allocationScope);
302         else
303                 return DE_NULL;
304 }
305
306 void* DeterministicFailAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
307 {
308         if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
309                 return ChainedAllocator::reallocate(original, size, alignment, allocationScope);
310         else
311                 return DE_NULL;
312 }
313
314 // Utils
315
316 AllocationCallbackValidationResults::AllocationCallbackValidationResults (void)
317 {
318         deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
319 }
320
321 void AllocationCallbackValidationResults::clear (void)
322 {
323         liveAllocations.clear();
324         violations.clear();
325         deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
326 }
327
328 namespace
329 {
330
331 struct AllocationSlot
332 {
333         AllocationCallbackRecord        record;
334         bool                                            isLive;
335
336         AllocationSlot (void)
337                 : isLive        (false)
338         {}
339
340         AllocationSlot (const AllocationCallbackRecord& record_, bool isLive_)
341                 : record        (record_)
342                 , isLive        (isLive_)
343         {}
344 };
345
346 size_t getAlignment (const AllocationCallbackRecord& record)
347 {
348         if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION)
349                 return record.data.allocation.alignment;
350         else if (record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
351                 return record.data.reallocation.alignment;
352         else
353         {
354                 DE_ASSERT(false);
355                 return 0;
356         }
357 }
358
359 } // anonymous
360
361 void validateAllocationCallbacks (const AllocationCallbackRecorder& recorder, AllocationCallbackValidationResults* results)
362 {
363         std::vector<AllocationSlot>             allocations;
364         std::map<void*, size_t>                 ptrToSlotIndex;
365
366         DE_ASSERT(results->liveAllocations.empty() && results->violations.empty());
367
368         for (AllocationCallbackRecorder::RecordIterator callbackIter = recorder.getRecordsBegin();
369                  callbackIter != recorder.getRecordsEnd();
370                  ++callbackIter)
371         {
372                 const AllocationCallbackRecord&         record  = *callbackIter;
373
374                 // Validate scope
375                 {
376                         const VkSystemAllocationScope* const    scopePtr        = record.type == AllocationCallbackRecord::TYPE_ALLOCATION                      ? &record.data.allocation.scope
377                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_REALLOCATION            ? &record.data.reallocation.scope
378                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION     ? &record.data.internalAllocation.scope
379                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE           ? &record.data.internalAllocation.scope
380                                                                                                                                 : DE_NULL;
381
382                         if (scopePtr && !de::inBounds(*scopePtr, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST))
383                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE));
384                 }
385
386                 // Validate alignment
387                 if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION ||
388                         record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
389                 {
390                         if (!deIsPowerOfTwoSize(getAlignment(record)))
391                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALIGNMENT));
392                 }
393
394                 // Validate actual allocation behavior
395                 switch (record.type)
396                 {
397                         case AllocationCallbackRecord::TYPE_ALLOCATION:
398                         {
399                                 if (record.data.allocation.returnedPtr)
400                                 { 
401                                         if (!de::contains(ptrToSlotIndex, record.data.allocation.returnedPtr))
402                                         {
403                                                 ptrToSlotIndex[record.data.allocation.returnedPtr] = allocations.size();
404                                                 allocations.push_back(AllocationSlot(record, true));
405                                         }
406                                         else
407                                         {
408                                                 const size_t            slotNdx         = ptrToSlotIndex[record.data.allocation.returnedPtr];
409                                                 if (!allocations[slotNdx].isLive) 
410                                                 {
411                                                         allocations[slotNdx].isLive = true;
412                                                         allocations[slotNdx].record = record;
413                                                 }
414                                                 else
415                                                 {
416                                                         // we should not have multiple live allocations with the same pointer
417                                                         DE_ASSERT(false);
418                                                 }
419                                         }
420                                 }
421
422                                 break;
423                         }
424
425                         case AllocationCallbackRecord::TYPE_REALLOCATION:
426                         {
427                                 if (de::contains(ptrToSlotIndex, record.data.reallocation.original))
428                                 {
429                                         const size_t            origSlotNdx             = ptrToSlotIndex[record.data.reallocation.original];
430                                         AllocationSlot&         origSlot                = allocations[origSlotNdx];
431
432                                         DE_ASSERT(record.data.reallocation.original != DE_NULL);
433
434                                         if (record.data.reallocation.size > 0)
435                                         {
436                                                 if (getAlignment(origSlot.record) != record.data.reallocation.alignment)
437                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT));
438
439                                                 if (record.data.reallocation.original == record.data.reallocation.returnedPtr)
440                                                 {
441                                                         if (!origSlot.isLive)
442                                                         {
443                                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_FREED_PTR));
444                                                                 origSlot.isLive = true; // Mark live to suppress further errors
445                                                         }
446
447                                                         // Just update slot record
448                                                         allocations[origSlotNdx].record = record;
449                                                 }
450                                                 else
451                                                 {
452                                                         if (record.data.reallocation.returnedPtr)
453                                                         {
454                                                                 allocations[origSlotNdx].isLive = false;
455                                                                 if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
456                                                                 {
457                                                                         ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
458                                                                         allocations.push_back(AllocationSlot(record, true));
459                                                                 }
460                                                                 else
461                                                                 {
462                                                                         const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
463                                                                         if (!allocations[slotNdx].isLive)
464                                                                         {
465                                                                                 allocations[slotNdx].isLive = true;
466                                                                                 allocations[slotNdx].record = record;
467                                                                         }
468                                                                         else
469                                                                         {
470                                                                                 // we should not have multiple live allocations with the same pointer
471                                                                                 DE_ASSERT(false);
472                                                                         }
473                                                                 }
474                                                         }
475                                                         // else original ptr remains valid and live
476                                                 }
477                                         }
478                                         else
479                                         {
480                                                 DE_ASSERT(!record.data.reallocation.returnedPtr);
481
482                                                 origSlot.isLive = false;
483                                         }
484                                 }
485                                 else
486                                 {
487                                         if (record.data.reallocation.original)
488                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR));
489
490                                         if (record.data.reallocation.returnedPtr)
491                                         {
492                                                 DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
493                                                 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
494                                                 allocations.push_back(AllocationSlot(record, true));
495                                         }
496                                 }
497
498                                 break;
499                         }
500
501                         case AllocationCallbackRecord::TYPE_FREE:
502                         {
503                                 if (record.data.free.mem != DE_NULL) // Freeing null pointer is valid and ignored
504                                 {
505                                         if (de::contains(ptrToSlotIndex, record.data.free.mem))
506                                         {
507                                                 const size_t    slotNdx         = ptrToSlotIndex[record.data.free.mem];
508
509                                                 if (allocations[slotNdx].isLive)
510                                                         allocations[slotNdx].isLive = false;
511                                                 else
512                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_DOUBLE_FREE));
513                                         }
514                                         else
515                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR));
516                                 }
517
518                                 break;
519                         }
520
521                         case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
522                         case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
523                         {
524                                 if (de::inBounds(record.data.internalAllocation.type, (VkInternalAllocationType)0, VK_INTERNAL_ALLOCATION_TYPE_LAST))
525                                 {
526                                         size_t* const           totalAllocSizePtr       = &results->internalAllocationTotal[record.data.internalAllocation.type][record.data.internalAllocation.scope];
527                                         const size_t            size                            = record.data.internalAllocation.size;
528
529                                         if (record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE)
530                                         {
531                                                 if (*totalAllocSizePtr < size)
532                                                 {
533                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL));
534                                                         *totalAllocSizePtr = 0; // Reset to 0 to suppress compound errors
535                                                 }
536                                                 else
537                                                         *totalAllocSizePtr -= size;
538                                         }
539                                         else
540                                                 *totalAllocSizePtr += size;
541                                 }
542                                 else
543                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE));
544
545                                 break;
546                         }
547
548                         default:
549                                 DE_ASSERT(false);
550                 }
551         }
552
553         DE_ASSERT(!de::contains(ptrToSlotIndex, DE_NULL));
554
555         // Collect live allocations
556         for (std::vector<AllocationSlot>::const_iterator slotIter = allocations.begin();
557                  slotIter != allocations.end();
558                  ++slotIter)
559         {
560                 if (slotIter->isLive)
561                         results->liveAllocations.push_back(slotIter->record);
562         }
563 }
564
565 bool checkAndLog (tcu::TestLog& log, const AllocationCallbackValidationResults& results, deUint32 allowedLiveAllocScopeBits)
566 {
567         using tcu::TestLog;
568
569         size_t  numLeaks        = 0;
570
571         if (!results.violations.empty())
572         {
573                 for (size_t violationNdx = 0; violationNdx < results.violations.size(); ++violationNdx)
574                 {
575                         log << TestLog::Message << "VIOLATION " << (violationNdx+1)
576                                                                                                         << ": " << results.violations[violationNdx]
577                                                                                                         << " (" << results.violations[violationNdx].record << ")"
578                                 << TestLog::EndMessage;
579                 }
580
581                 log << TestLog::Message << "ERROR: Found " << results.violations.size() << " invalid allocation callbacks!" << TestLog::EndMessage;
582         }
583
584         // Verify live allocations
585         for (size_t liveNdx = 0; liveNdx < results.liveAllocations.size(); ++liveNdx)
586         {
587                 const AllocationCallbackRecord&         record  = results.liveAllocations[liveNdx];
588                 const VkSystemAllocationScope           scope   = record.type == AllocationCallbackRecord::TYPE_ALLOCATION              ? record.data.allocation.scope
589                                                                                                         : record.type == AllocationCallbackRecord::TYPE_REALLOCATION    ? record.data.reallocation.scope
590                                                                                                         : VK_SYSTEM_ALLOCATION_SCOPE_LAST;
591
592                 DE_ASSERT(de::inBounds(scope, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST));
593
594                 if ((allowedLiveAllocScopeBits & (1u << scope)) == 0)
595                 {
596                         log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << record << TestLog::EndMessage;
597                         numLeaks += 1;
598                 }
599         }
600
601         // Verify internal allocations
602         for (int internalAllocTypeNdx = 0; internalAllocTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocTypeNdx)
603         {
604                 for (int scopeNdx = 0; scopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++scopeNdx)
605                 {
606                         const VkInternalAllocationType  type                    = (VkInternalAllocationType)internalAllocTypeNdx;
607                         const VkSystemAllocationScope   scope                   = (VkSystemAllocationScope)scopeNdx;
608                         const size_t                                    totalAllocated  = results.internalAllocationTotal[type][scope];
609
610                         if ((allowedLiveAllocScopeBits & (1u << scopeNdx)) == 0 &&
611                                 totalAllocated > 0)
612                         {
613                                 log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << totalAllocated
614                                                                                 << " bytes of (" << type << ", " << scope << ") internal memory is still allocated"
615                                         << TestLog::EndMessage;
616                                 numLeaks += 1;
617                         }
618                 }
619         }
620
621         if (numLeaks > 0)
622                 log << TestLog::Message << "ERROR: Found " << numLeaks << " memory leaks!" << TestLog::EndMessage;
623
624         return results.violations.empty() && numLeaks == 0;
625 }
626
627 bool validateAndLog (tcu::TestLog& log, const AllocationCallbackRecorder& recorder, deUint32 allowedLiveAllocScopeBits)
628 {
629         AllocationCallbackValidationResults     validationResults;
630
631         validateAllocationCallbacks(recorder, &validationResults);
632
633         return checkAndLog(log, validationResults, allowedLiveAllocScopeBits);
634 }
635
636 std::ostream& operator<< (std::ostream& str, const AllocationCallbackRecord& record)
637 {
638         switch (record.type)
639         {
640                 case AllocationCallbackRecord::TYPE_ALLOCATION:
641                         str << "ALLOCATION: size=" << record.data.allocation.size
642                                 << ", alignment=" << record.data.allocation.alignment
643                                 << ", scope=" << record.data.allocation.scope
644                                 << ", returnedPtr=" << tcu::toHex(record.data.allocation.returnedPtr);
645                         break;
646
647                 case AllocationCallbackRecord::TYPE_REALLOCATION:
648                         str << "REALLOCATION: original=" << tcu::toHex(record.data.reallocation.original)
649                                 << ", size=" << record.data.reallocation.size
650                                 << ", alignment=" << record.data.reallocation.alignment
651                                 << ", scope=" << record.data.reallocation.scope
652                                 << ", returnedPtr=" << tcu::toHex(record.data.reallocation.returnedPtr);
653                         break;
654
655                 case AllocationCallbackRecord::TYPE_FREE:
656                         str << "FREE: mem=" << tcu::toHex(record.data.free.mem);
657                         break;
658
659                 case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
660                 case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
661                         str << "INTERNAL_" << (record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? "ALLOCATION" : "FREE")
662                                 << ": size=" << record.data.internalAllocation.size
663                                 << ", type=" << record.data.internalAllocation.type
664                                 << ", scope=" << record.data.internalAllocation.scope;
665                         break;
666
667                 default:
668                         DE_ASSERT(false);
669         }
670
671         return str;
672 }
673
674 std::ostream& operator<< (std::ostream& str, const AllocationCallbackViolation& violation)
675 {
676         switch (violation.reason)
677         {
678                 case AllocationCallbackViolation::REASON_DOUBLE_FREE:
679                 {
680                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
681                         str << "Double free of " << tcu::toHex(violation.record.data.free.mem);
682                         break;
683                 }
684
685                 case AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR:
686                 {
687                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
688                         str << "Attempt to free " << tcu::toHex(violation.record.data.free.mem) << " which has not been allocated";
689                         break;
690                 }
691
692                 case AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR:
693                 {
694                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
695                         str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has not been allocated";
696                         break;
697                 }
698
699                 case AllocationCallbackViolation::REASON_REALLOC_FREED_PTR:
700                 {
701                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
702                         str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has been freed";
703                         break;
704                 }
705
706                 case AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL:
707                 {
708                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
709                         str << "Internal allocation total for (" << violation.record.data.internalAllocation.type << ", " << violation.record.data.internalAllocation.scope << ") is negative";
710                         break;
711                 }
712
713                 case AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE:
714                 {
715                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ||
716                                           violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
717                         str << "Invalid internal allocation type " << tcu::toHex(violation.record.data.internalAllocation.type);
718                         break;
719                 }
720
721                 case AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE:
722                 {
723                         str << "Invalid allocation scope";
724                         break;
725                 }
726
727                 case AllocationCallbackViolation::REASON_INVALID_ALIGNMENT:
728                 {
729                         str << "Invalid alignment";
730                         break;
731                 }
732
733                 case AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT:
734                 {
735                         str << "Reallocation with different alignment";
736                         break;
737                 }
738
739                 default:
740                         DE_ASSERT(false);
741         }
742
743         return str;
744 }
745
746 } // vk