Merge branch 'jekstrand_copy_and_bit_usage_flags' into 'master'
[platform/upstream/VK-GL-CTS.git] / external / vulkancts / framework / vulkan / vkAllocationCallbackUtil.cpp
1 /*-------------------------------------------------------------------------
2  * Vulkan CTS Framework
3  * --------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and/or associated documentation files (the
9  * "Materials"), to deal in the Materials without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sublicense, and/or sell copies of the Materials, and to
12  * permit persons to whom the Materials are furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice(s) and this permission notice shall be
16  * included in all copies or substantial portions of the Materials.
17  *
18  * The Materials are Confidential Information as defined by the
19  * Khronos Membership Agreement until designated non-confidential by
20  * Khronos, at which point this condition clause shall be removed.
21  *
22  * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
26  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28  * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
29  *
30  *//*!
31  * \file
32  * \brief Memory allocation callback utilities.
33  *//*--------------------------------------------------------------------*/
34
35 #include "vkAllocationCallbackUtil.hpp"
36 #include "tcuFormatUtil.hpp"
37 #include "tcuTestLog.hpp"
38 #include "deSTLUtil.hpp"
39 #include "deMemory.h"
40
41 #include <map>
42
43 namespace vk
44 {
45
46 // System default allocator
47
48 static VKAPI_ATTR void* VKAPI_CALL systemAllocate (void*, size_t size, size_t alignment, VkSystemAllocationScope)
49 {
50         if (size > 0)
51                 return deAlignedMalloc(size, (deUint32)alignment);
52         else
53                 return DE_NULL;
54 }
55
56 static VKAPI_ATTR void VKAPI_CALL systemFree (void*, void* pMem)
57 {
58         deAlignedFree(pMem);
59 }
60
61 static VKAPI_ATTR void* VKAPI_CALL systemReallocate (void*, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope)
62 {
63         return deAlignedRealloc(pOriginal, size, alignment);
64 }
65
66 static VKAPI_ATTR void VKAPI_CALL systemInternalAllocationNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
67 {
68 }
69
70 static VKAPI_ATTR void VKAPI_CALL systemInternalFreeNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
71 {
72 }
73
74 static const VkAllocationCallbacks s_systemAllocator =
75 {
76         DE_NULL,                // pUserData
77         systemAllocate,
78         systemReallocate,
79         systemFree,
80         systemInternalAllocationNotification,
81         systemInternalFreeNotification,
82 };
83
84 const VkAllocationCallbacks* getSystemAllocator (void)
85 {
86         return &s_systemAllocator;
87 }
88
89 // AllocationCallbacks
90
91 static VKAPI_ATTR void* VKAPI_CALL allocationCallback (void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
92 {
93         return reinterpret_cast<AllocationCallbacks*>(pUserData)->allocate(size, alignment, allocationScope);
94 }
95
96 static VKAPI_ATTR void* VKAPI_CALL reallocationCallback (void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
97 {
98         return reinterpret_cast<AllocationCallbacks*>(pUserData)->reallocate(pOriginal, size, alignment, allocationScope);
99 }
100
101 static VKAPI_ATTR void VKAPI_CALL freeCallback (void* pUserData, void* pMem)
102 {
103         reinterpret_cast<AllocationCallbacks*>(pUserData)->free(pMem);
104 }
105
106 static VKAPI_ATTR void VKAPI_CALL internalAllocationNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
107 {
108         reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalAllocation(size, allocationType, allocationScope);
109 }
110
111 static VKAPI_ATTR void VKAPI_CALL internalFreeNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
112 {
113         reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalFree(size, allocationType, allocationScope);
114 }
115
116 static VkAllocationCallbacks makeCallbacks (AllocationCallbacks* object)
117 {
118         const VkAllocationCallbacks callbacks =
119         {
120                 reinterpret_cast<void*>(object),
121                 allocationCallback,
122                 reallocationCallback,
123                 freeCallback,
124                 internalAllocationNotificationCallback,
125                 internalFreeNotificationCallback
126         };
127         return callbacks;
128 }
129
130 AllocationCallbacks::AllocationCallbacks (void)
131         : m_callbacks(makeCallbacks(this))
132 {
133 }
134
135 AllocationCallbacks::~AllocationCallbacks (void)
136 {
137 }
138
139 // AllocationCallbackRecord
140
141 AllocationCallbackRecord AllocationCallbackRecord::allocation (size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
142 {
143         AllocationCallbackRecord record;
144
145         record.type                                                     = TYPE_ALLOCATION;
146         record.data.allocation.size                     = size;
147         record.data.allocation.alignment        = alignment;
148         record.data.allocation.scope            = scope;
149         record.data.allocation.returnedPtr      = returnedPtr;
150
151         return record;
152 }
153
154 AllocationCallbackRecord AllocationCallbackRecord::reallocation (void* original, size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
155 {
156         AllocationCallbackRecord record;
157
158         record.type                                                             = TYPE_REALLOCATION;
159         record.data.reallocation.original               = original;
160         record.data.reallocation.size                   = size;
161         record.data.reallocation.alignment              = alignment;
162         record.data.reallocation.scope                  = scope;
163         record.data.reallocation.returnedPtr    = returnedPtr;
164
165         return record;
166 }
167
168 AllocationCallbackRecord AllocationCallbackRecord::free (void* mem)
169 {
170         AllocationCallbackRecord record;
171
172         record.type                             = TYPE_FREE;
173         record.data.free.mem    = mem;
174
175         return record;
176 }
177
178 AllocationCallbackRecord AllocationCallbackRecord::internalAllocation (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
179 {
180         AllocationCallbackRecord record;
181
182         record.type                                                             = TYPE_INTERNAL_ALLOCATION;
183         record.data.internalAllocation.size             = size;
184         record.data.internalAllocation.type             = type;
185         record.data.internalAllocation.scope    = scope;
186
187         return record;
188 }
189
190 AllocationCallbackRecord AllocationCallbackRecord::internalFree (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
191 {
192         AllocationCallbackRecord record;
193
194         record.type                                                             = TYPE_INTERNAL_FREE;
195         record.data.internalAllocation.size             = size;
196         record.data.internalAllocation.type             = type;
197         record.data.internalAllocation.scope    = scope;
198
199         return record;
200 }
201
202 // ChainedAllocator
203
204 ChainedAllocator::ChainedAllocator (const VkAllocationCallbacks* nextAllocator)
205         : m_nextAllocator(nextAllocator)
206 {
207 }
208
209 ChainedAllocator::~ChainedAllocator (void)
210 {
211 }
212
213 void* ChainedAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
214 {
215         return m_nextAllocator->pfnAllocation(m_nextAllocator->pUserData, size, alignment, allocationScope);
216 }
217
218 void* ChainedAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
219 {
220         return m_nextAllocator->pfnReallocation(m_nextAllocator->pUserData, original, size, alignment, allocationScope);
221 }
222
223 void ChainedAllocator::free (void* mem)
224 {
225         m_nextAllocator->pfnFree(m_nextAllocator->pUserData, mem);
226 }
227
228 void ChainedAllocator::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
229 {
230         m_nextAllocator->pfnInternalAllocation(m_nextAllocator->pUserData, size, allocationType, allocationScope);
231 }
232
233 void ChainedAllocator::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
234 {
235         m_nextAllocator->pfnInternalFree(m_nextAllocator->pUserData, size, allocationType, allocationScope);
236 }
237
238 // AllocationCallbackRecorder
239
240 AllocationCallbackRecorder::AllocationCallbackRecorder (const VkAllocationCallbacks* allocator, deUint32 callCountHint)
241         : ChainedAllocator      (allocator)
242         , m_records                     (callCountHint)
243 {
244 }
245
246 AllocationCallbackRecorder::~AllocationCallbackRecorder (void)
247 {
248 }
249
250 void* AllocationCallbackRecorder::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
251 {
252         void* const     ptr     = ChainedAllocator::allocate(size, alignment, allocationScope);
253
254         m_records.append(AllocationCallbackRecord::allocation(size, alignment, allocationScope, ptr));
255
256         return ptr;
257 }
258
259 void* AllocationCallbackRecorder::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
260 {
261         void* const     ptr     = ChainedAllocator::reallocate(original, size, alignment, allocationScope);
262
263         m_records.append(AllocationCallbackRecord::reallocation(original, size, alignment, allocationScope, ptr));
264
265         return ptr;
266 }
267
268 void AllocationCallbackRecorder::free (void* mem)
269 {
270         ChainedAllocator::free(mem);
271
272         m_records.append(AllocationCallbackRecord::free(mem));
273 }
274
275 void AllocationCallbackRecorder::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
276 {
277         ChainedAllocator::notifyInternalAllocation(size, allocationType, allocationScope);
278
279         m_records.append(AllocationCallbackRecord::internalAllocation(size, allocationType, allocationScope));
280 }
281
282 void AllocationCallbackRecorder::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
283 {
284         ChainedAllocator::notifyInternalFree(size, allocationType, allocationScope);
285
286         m_records.append(AllocationCallbackRecord::internalFree(size, allocationType, allocationScope));
287 }
288
289 // DeterministicFailAllocator
290
291 DeterministicFailAllocator::DeterministicFailAllocator (const VkAllocationCallbacks* allocator, deUint32 numPassingAllocs)
292         : ChainedAllocator      (allocator)
293         , m_numPassingAllocs(numPassingAllocs)
294         , m_allocationNdx       (0)
295 {
296 }
297
298 DeterministicFailAllocator::~DeterministicFailAllocator (void)
299 {
300 }
301
302 void* DeterministicFailAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
303 {
304         if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
305                 return ChainedAllocator::allocate(size, alignment, allocationScope);
306         else
307                 return DE_NULL;
308 }
309
310 void* DeterministicFailAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
311 {
312         if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
313                 return ChainedAllocator::reallocate(original, size, alignment, allocationScope);
314         else
315                 return DE_NULL;
316 }
317
318 // Utils
319
320 AllocationCallbackValidationResults::AllocationCallbackValidationResults (void)
321 {
322         deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
323 }
324
325 void AllocationCallbackValidationResults::clear (void)
326 {
327         liveAllocations.clear();
328         violations.clear();
329         deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
330 }
331
332 namespace
333 {
334
335 struct AllocationSlot
336 {
337         AllocationCallbackRecord        record;
338         bool                                            isLive;
339
340         AllocationSlot (void)
341                 : isLive        (false)
342         {}
343
344         AllocationSlot (const AllocationCallbackRecord& record_, bool isLive_)
345                 : record        (record_)
346                 , isLive        (isLive_)
347         {}
348 };
349
350 size_t getAlignment (const AllocationCallbackRecord& record)
351 {
352         if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION)
353                 return record.data.allocation.alignment;
354         else if (record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
355                 return record.data.reallocation.alignment;
356         else
357         {
358                 DE_ASSERT(false);
359                 return 0;
360         }
361 }
362
363 } // anonymous
364
365 void validateAllocationCallbacks (const AllocationCallbackRecorder& recorder, AllocationCallbackValidationResults* results)
366 {
367         std::vector<AllocationSlot>             allocations;
368         std::map<void*, size_t>                 ptrToSlotIndex;
369
370         DE_ASSERT(results->liveAllocations.empty() && results->violations.empty());
371
372         for (AllocationCallbackRecorder::RecordIterator callbackIter = recorder.getRecordsBegin();
373                  callbackIter != recorder.getRecordsEnd();
374                  ++callbackIter)
375         {
376                 const AllocationCallbackRecord&         record  = *callbackIter;
377
378                 // Validate scope
379                 {
380                         const VkSystemAllocationScope* const    scopePtr        = record.type == AllocationCallbackRecord::TYPE_ALLOCATION                      ? &record.data.allocation.scope
381                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_REALLOCATION            ? &record.data.reallocation.scope
382                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION     ? &record.data.internalAllocation.scope
383                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE           ? &record.data.internalAllocation.scope
384                                                                                                                                 : DE_NULL;
385
386                         if (scopePtr && !de::inBounds(*scopePtr, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST))
387                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE));
388                 }
389
390                 // Validate alignment
391                 if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION ||
392                         record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
393                 {
394                         if (!deIsPowerOfTwoSize(getAlignment(record)))
395                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALIGNMENT));
396                 }
397
398                 // Validate actual allocation behavior
399                 switch (record.type)
400                 {
401                         case AllocationCallbackRecord::TYPE_ALLOCATION:
402                         {
403                                 if (record.data.allocation.returnedPtr)
404                                 { 
405                                         if (!de::contains(ptrToSlotIndex, record.data.allocation.returnedPtr))
406                                         {
407                                                 ptrToSlotIndex[record.data.allocation.returnedPtr] = allocations.size();
408                                                 allocations.push_back(AllocationSlot(record, true));
409                                         }
410                                         else
411                                         {
412                                                 const size_t            slotNdx         = ptrToSlotIndex[record.data.allocation.returnedPtr];
413                                                 if (!allocations[slotNdx].isLive) 
414                                                 {
415                                                         allocations[slotNdx].isLive = true;
416                                                         allocations[slotNdx].record = record;
417                                                 }
418                                                 else
419                                                 {
420                                                         // we should not have multiple live allocations with the same pointer
421                                                         DE_ASSERT(false);
422                                                 }
423                                         }
424                                 }
425
426                                 break;
427                         }
428
429                         case AllocationCallbackRecord::TYPE_REALLOCATION:
430                         {
431                                 if (de::contains(ptrToSlotIndex, record.data.reallocation.original))
432                                 {
433                                         const size_t            origSlotNdx             = ptrToSlotIndex[record.data.reallocation.original];
434                                         AllocationSlot&         origSlot                = allocations[origSlotNdx];
435
436                                         DE_ASSERT(record.data.reallocation.original != DE_NULL);
437
438                                         if (record.data.reallocation.size > 0)
439                                         {
440                                                 if (getAlignment(origSlot.record) != record.data.reallocation.alignment)
441                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT));
442
443                                                 if (record.data.reallocation.original == record.data.reallocation.returnedPtr)
444                                                 {
445                                                         if (!origSlot.isLive)
446                                                         {
447                                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_FREED_PTR));
448                                                                 origSlot.isLive = true; // Mark live to suppress further errors
449                                                         }
450
451                                                         // Just update slot record
452                                                         allocations[origSlotNdx].record = record;
453                                                 }
454                                                 else
455                                                 {
456                                                         if (record.data.reallocation.returnedPtr)
457                                                         {
458                                                                 allocations[origSlotNdx].isLive = false;
459                                                                 if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
460                                                                 {
461                                                                         ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
462                                                                         allocations.push_back(AllocationSlot(record, true));
463                                                                 }
464                                                                 else
465                                                                 {
466                                                                         const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
467                                                                         if (!allocations[slotNdx].isLive)
468                                                                         {
469                                                                                 allocations[slotNdx].isLive = true;
470                                                                                 allocations[slotNdx].record = record;
471                                                                         }
472                                                                         else
473                                                                         {
474                                                                                 // we should not have multiple live allocations with the same pointer
475                                                                                 DE_ASSERT(false);
476                                                                         }
477                                                                 }
478                                                         }
479                                                         // else original ptr remains valid and live
480                                                 }
481                                         }
482                                         else
483                                         {
484                                                 DE_ASSERT(!record.data.reallocation.returnedPtr);
485
486                                                 origSlot.isLive = false;
487                                         }
488                                 }
489                                 else
490                                 {
491                                         if (record.data.reallocation.original)
492                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR));
493
494                                         if (record.data.reallocation.returnedPtr)
495                                         {
496                                                 DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
497                                                 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
498                                                 allocations.push_back(AllocationSlot(record, true));
499                                         }
500                                 }
501
502                                 break;
503                         }
504
505                         case AllocationCallbackRecord::TYPE_FREE:
506                         {
507                                 if (record.data.free.mem != DE_NULL) // Freeing null pointer is valid and ignored
508                                 {
509                                         if (de::contains(ptrToSlotIndex, record.data.free.mem))
510                                         {
511                                                 const size_t    slotNdx         = ptrToSlotIndex[record.data.free.mem];
512
513                                                 if (allocations[slotNdx].isLive)
514                                                         allocations[slotNdx].isLive = false;
515                                                 else
516                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_DOUBLE_FREE));
517                                         }
518                                         else
519                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR));
520                                 }
521
522                                 break;
523                         }
524
525                         case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
526                         case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
527                         {
528                                 if (de::inBounds(record.data.internalAllocation.type, (VkInternalAllocationType)0, VK_INTERNAL_ALLOCATION_TYPE_LAST))
529                                 {
530                                         size_t* const           totalAllocSizePtr       = &results->internalAllocationTotal[record.data.internalAllocation.type][record.data.internalAllocation.scope];
531                                         const size_t            size                            = record.data.internalAllocation.size;
532
533                                         if (record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE)
534                                         {
535                                                 if (*totalAllocSizePtr < size)
536                                                 {
537                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL));
538                                                         *totalAllocSizePtr = 0; // Reset to 0 to suppress compound errors
539                                                 }
540                                                 else
541                                                         *totalAllocSizePtr -= size;
542                                         }
543                                         else
544                                                 *totalAllocSizePtr += size;
545                                 }
546                                 else
547                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE));
548
549                                 break;
550                         }
551
552                         default:
553                                 DE_ASSERT(false);
554                 }
555         }
556
557         DE_ASSERT(!de::contains(ptrToSlotIndex, DE_NULL));
558
559         // Collect live allocations
560         for (std::vector<AllocationSlot>::const_iterator slotIter = allocations.begin();
561                  slotIter != allocations.end();
562                  ++slotIter)
563         {
564                 if (slotIter->isLive)
565                         results->liveAllocations.push_back(slotIter->record);
566         }
567 }
568
569 bool checkAndLog (tcu::TestLog& log, const AllocationCallbackValidationResults& results, deUint32 allowedLiveAllocScopeBits)
570 {
571         using tcu::TestLog;
572
573         size_t  numLeaks        = 0;
574
575         if (!results.violations.empty())
576         {
577                 for (size_t violationNdx = 0; violationNdx < results.violations.size(); ++violationNdx)
578                 {
579                         log << TestLog::Message << "VIOLATION " << (violationNdx+1)
580                                                                                                         << ": " << results.violations[violationNdx]
581                                                                                                         << " (" << results.violations[violationNdx].record << ")"
582                                 << TestLog::EndMessage;
583                 }
584
585                 log << TestLog::Message << "ERROR: Found " << results.violations.size() << " invalid allocation callbacks!" << TestLog::EndMessage;
586         }
587
588         // Verify live allocations
589         for (size_t liveNdx = 0; liveNdx < results.liveAllocations.size(); ++liveNdx)
590         {
591                 const AllocationCallbackRecord&         record  = results.liveAllocations[liveNdx];
592                 const VkSystemAllocationScope           scope   = record.type == AllocationCallbackRecord::TYPE_ALLOCATION              ? record.data.allocation.scope
593                                                                                                         : record.type == AllocationCallbackRecord::TYPE_REALLOCATION    ? record.data.reallocation.scope
594                                                                                                         : VK_SYSTEM_ALLOCATION_SCOPE_LAST;
595
596                 DE_ASSERT(de::inBounds(scope, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST));
597
598                 if ((allowedLiveAllocScopeBits & (1u << scope)) == 0)
599                 {
600                         log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << record << TestLog::EndMessage;
601                         numLeaks += 1;
602                 }
603         }
604
605         // Verify internal allocations
606         for (int internalAllocTypeNdx = 0; internalAllocTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocTypeNdx)
607         {
608                 for (int scopeNdx = 0; scopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++scopeNdx)
609                 {
610                         const VkInternalAllocationType  type                    = (VkInternalAllocationType)internalAllocTypeNdx;
611                         const VkSystemAllocationScope   scope                   = (VkSystemAllocationScope)scopeNdx;
612                         const size_t                                    totalAllocated  = results.internalAllocationTotal[type][scope];
613
614                         if ((allowedLiveAllocScopeBits & (1u << scopeNdx)) == 0 &&
615                                 totalAllocated > 0)
616                         {
617                                 log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << totalAllocated
618                                                                                 << " bytes of (" << type << ", " << scope << ") internal memory is still allocated"
619                                         << TestLog::EndMessage;
620                                 numLeaks += 1;
621                         }
622                 }
623         }
624
625         if (numLeaks > 0)
626                 log << TestLog::Message << "ERROR: Found " << numLeaks << " memory leaks!" << TestLog::EndMessage;
627
628         return results.violations.empty() && numLeaks == 0;
629 }
630
631 bool validateAndLog (tcu::TestLog& log, const AllocationCallbackRecorder& recorder, deUint32 allowedLiveAllocScopeBits)
632 {
633         AllocationCallbackValidationResults     validationResults;
634
635         validateAllocationCallbacks(recorder, &validationResults);
636
637         return checkAndLog(log, validationResults, allowedLiveAllocScopeBits);
638 }
639
640 std::ostream& operator<< (std::ostream& str, const AllocationCallbackRecord& record)
641 {
642         switch (record.type)
643         {
644                 case AllocationCallbackRecord::TYPE_ALLOCATION:
645                         str << "ALLOCATION: size=" << record.data.allocation.size
646                                 << ", alignment=" << record.data.allocation.alignment
647                                 << ", scope=" << record.data.allocation.scope
648                                 << ", returnedPtr=" << tcu::toHex(record.data.allocation.returnedPtr);
649                         break;
650
651                 case AllocationCallbackRecord::TYPE_REALLOCATION:
652                         str << "REALLOCATION: original=" << tcu::toHex(record.data.reallocation.original)
653                                 << ", size=" << record.data.reallocation.size
654                                 << ", alignment=" << record.data.reallocation.alignment
655                                 << ", scope=" << record.data.reallocation.scope
656                                 << ", returnedPtr=" << tcu::toHex(record.data.reallocation.returnedPtr);
657                         break;
658
659                 case AllocationCallbackRecord::TYPE_FREE:
660                         str << "FREE: mem=" << tcu::toHex(record.data.free.mem);
661                         break;
662
663                 case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
664                 case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
665                         str << "INTERNAL_" << (record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? "ALLOCATION" : "FREE")
666                                 << ": size=" << record.data.internalAllocation.size
667                                 << ", type=" << record.data.internalAllocation.type
668                                 << ", scope=" << record.data.internalAllocation.scope;
669                         break;
670
671                 default:
672                         DE_ASSERT(false);
673         }
674
675         return str;
676 }
677
678 std::ostream& operator<< (std::ostream& str, const AllocationCallbackViolation& violation)
679 {
680         switch (violation.reason)
681         {
682                 case AllocationCallbackViolation::REASON_DOUBLE_FREE:
683                 {
684                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
685                         str << "Double free of " << tcu::toHex(violation.record.data.free.mem);
686                         break;
687                 }
688
689                 case AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR:
690                 {
691                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
692                         str << "Attempt to free " << tcu::toHex(violation.record.data.free.mem) << " which has not been allocated";
693                         break;
694                 }
695
696                 case AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR:
697                 {
698                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
699                         str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has not been allocated";
700                         break;
701                 }
702
703                 case AllocationCallbackViolation::REASON_REALLOC_FREED_PTR:
704                 {
705                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
706                         str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has been freed";
707                         break;
708                 }
709
710                 case AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL:
711                 {
712                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
713                         str << "Internal allocation total for (" << violation.record.data.internalAllocation.type << ", " << violation.record.data.internalAllocation.scope << ") is negative";
714                         break;
715                 }
716
717                 case AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE:
718                 {
719                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ||
720                                           violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
721                         str << "Invalid internal allocation type " << tcu::toHex(violation.record.data.internalAllocation.type);
722                         break;
723                 }
724
725                 case AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE:
726                 {
727                         str << "Invalid allocation scope";
728                         break;
729                 }
730
731                 case AllocationCallbackViolation::REASON_INVALID_ALIGNMENT:
732                 {
733                         str << "Invalid alignment";
734                         break;
735                 }
736
737                 case AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT:
738                 {
739                         str << "Reallocation with different alignment";
740                         break;
741                 }
742
743                 default:
744                         DE_ASSERT(false);
745         }
746
747         return str;
748 }
749
750 } // vk