Merge branch 'jekstrand_renderpass_transfer_bit_fix' into 'master'
[platform/upstream/VK-GL-CTS.git] / external / vulkancts / framework / vulkan / vkAllocationCallbackUtil.cpp
1 /*-------------------------------------------------------------------------
2  * Vulkan CTS Framework
3  * --------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and/or associated documentation files (the
9  * "Materials"), to deal in the Materials without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sublicense, and/or sell copies of the Materials, and to
12  * permit persons to whom the Materials are furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice(s) and this permission notice shall be
16  * included in all copies or substantial portions of the Materials.
17  *
18  * The Materials are Confidential Information as defined by the
19  * Khronos Membership Agreement until designated non-confidential by
20  * Khronos, at which point this condition clause shall be removed.
21  *
22  * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
26  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28  * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
29  *
30  *//*!
31  * \file
32  * \brief Memory allocation callback utilities.
33  *//*--------------------------------------------------------------------*/
34
35 #include "vkAllocationCallbackUtil.hpp"
36 #include "tcuFormatUtil.hpp"
37 #include "tcuTestLog.hpp"
38 #include "deSTLUtil.hpp"
39 #include "deMemory.h"
40
41 #include <map>
42
43 namespace vk
44 {
45
46 // System default allocator
47
48 static VKAPI_ATTR void* VKAPI_CALL systemAllocate (void*, size_t size, size_t alignment, VkSystemAllocationScope)
49 {
50         if (size > 0)
51                 return deAlignedMalloc(size, (deUint32)alignment);
52         else
53                 return DE_NULL;
54 }
55
56 static VKAPI_ATTR void VKAPI_CALL systemFree (void*, void* pMem)
57 {
58         deAlignedFree(pMem);
59 }
60
61 static VKAPI_ATTR void* VKAPI_CALL systemReallocate (void*, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope)
62 {
63         return deAlignedRealloc(pOriginal, size, alignment);
64 }
65
66 static VKAPI_ATTR void VKAPI_CALL systemInternalAllocationNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
67 {
68 }
69
70 static VKAPI_ATTR void VKAPI_CALL systemInternalFreeNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
71 {
72 }
73
74 static const VkAllocationCallbacks s_systemAllocator =
75 {
76         DE_NULL,                // pUserData
77         systemAllocate,
78         systemReallocate,
79         systemFree,
80         systemInternalAllocationNotification,
81         systemInternalFreeNotification,
82 };
83
84 const VkAllocationCallbacks* getSystemAllocator (void)
85 {
86         return &s_systemAllocator;
87 }
88
89 // AllocationCallbacks
90
91 static VKAPI_ATTR void* VKAPI_CALL allocationCallback (void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
92 {
93         return reinterpret_cast<AllocationCallbacks*>(pUserData)->allocate(size, alignment, allocationScope);
94 }
95
96 static VKAPI_ATTR void* VKAPI_CALL reallocationCallback (void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
97 {
98         return reinterpret_cast<AllocationCallbacks*>(pUserData)->reallocate(pOriginal, size, alignment, allocationScope);
99 }
100
101 static VKAPI_ATTR void VKAPI_CALL freeCallback (void* pUserData, void* pMem)
102 {
103         reinterpret_cast<AllocationCallbacks*>(pUserData)->free(pMem);
104 }
105
106 static VKAPI_ATTR void VKAPI_CALL internalAllocationNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
107 {
108         reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalAllocation(size, allocationType, allocationScope);
109 }
110
111 static VKAPI_ATTR void VKAPI_CALL internalFreeNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
112 {
113         reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalFree(size, allocationType, allocationScope);
114 }
115
116 static VkAllocationCallbacks makeCallbacks (AllocationCallbacks* object)
117 {
118         const VkAllocationCallbacks callbacks =
119         {
120                 reinterpret_cast<void*>(object),
121                 allocationCallback,
122                 reallocationCallback,
123                 freeCallback,
124                 internalAllocationNotificationCallback,
125                 internalFreeNotificationCallback
126         };
127         return callbacks;
128 }
129
130 AllocationCallbacks::AllocationCallbacks (void)
131         : m_callbacks(makeCallbacks(this))
132 {
133 }
134
135 AllocationCallbacks::~AllocationCallbacks (void)
136 {
137 }
138
139 // AllocationCallbackRecord
140
141 AllocationCallbackRecord AllocationCallbackRecord::allocation (size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
142 {
143         AllocationCallbackRecord record;
144
145         record.type                                                     = TYPE_ALLOCATION;
146         record.data.allocation.size                     = size;
147         record.data.allocation.alignment        = alignment;
148         record.data.allocation.scope            = scope;
149         record.data.allocation.returnedPtr      = returnedPtr;
150
151         return record;
152 }
153
154 AllocationCallbackRecord AllocationCallbackRecord::reallocation (void* original, size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
155 {
156         AllocationCallbackRecord record;
157
158         record.type                                                             = TYPE_REALLOCATION;
159         record.data.reallocation.original               = original;
160         record.data.reallocation.size                   = size;
161         record.data.reallocation.alignment              = alignment;
162         record.data.reallocation.scope                  = scope;
163         record.data.reallocation.returnedPtr    = returnedPtr;
164
165         return record;
166 }
167
168 AllocationCallbackRecord AllocationCallbackRecord::free (void* mem)
169 {
170         AllocationCallbackRecord record;
171
172         record.type                             = TYPE_FREE;
173         record.data.free.mem    = mem;
174
175         return record;
176 }
177
178 AllocationCallbackRecord AllocationCallbackRecord::internalAllocation (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
179 {
180         AllocationCallbackRecord record;
181
182         record.type                                                             = TYPE_INTERNAL_ALLOCATION;
183         record.data.internalAllocation.size             = size;
184         record.data.internalAllocation.type             = type;
185         record.data.internalAllocation.scope    = scope;
186
187         return record;
188 }
189
190 AllocationCallbackRecord AllocationCallbackRecord::internalFree (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
191 {
192         AllocationCallbackRecord record;
193
194         record.type                                                             = TYPE_INTERNAL_FREE;
195         record.data.internalAllocation.size             = size;
196         record.data.internalAllocation.type             = type;
197         record.data.internalAllocation.scope    = scope;
198
199         return record;
200 }
201
202 // ChainedAllocator
203
204 ChainedAllocator::ChainedAllocator (const VkAllocationCallbacks* nextAllocator)
205         : m_nextAllocator(nextAllocator)
206 {
207 }
208
209 ChainedAllocator::~ChainedAllocator (void)
210 {
211 }
212
213 void* ChainedAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
214 {
215         return m_nextAllocator->pfnAllocation(m_nextAllocator->pUserData, size, alignment, allocationScope);
216 }
217
218 void* ChainedAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
219 {
220         return m_nextAllocator->pfnReallocation(m_nextAllocator->pUserData, original, size, alignment, allocationScope);
221 }
222
223 void ChainedAllocator::free (void* mem)
224 {
225         m_nextAllocator->pfnFree(m_nextAllocator->pUserData, mem);
226 }
227
228 void ChainedAllocator::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
229 {
230         m_nextAllocator->pfnInternalAllocation(m_nextAllocator->pUserData, size, allocationType, allocationScope);
231 }
232
233 void ChainedAllocator::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
234 {
235         m_nextAllocator->pfnInternalFree(m_nextAllocator->pUserData, size, allocationType, allocationScope);
236 }
237
238 // AllocationCallbackRecorder
239
240 AllocationCallbackRecorder::AllocationCallbackRecorder (const VkAllocationCallbacks* allocator, deUint32 callCountHint)
241         : ChainedAllocator      (allocator)
242         , m_records                     (callCountHint)
243 {
244 }
245
246 AllocationCallbackRecorder::~AllocationCallbackRecorder (void)
247 {
248 }
249
250 void* AllocationCallbackRecorder::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
251 {
252         void* const     ptr     = ChainedAllocator::allocate(size, alignment, allocationScope);
253
254         m_records.append(AllocationCallbackRecord::allocation(size, alignment, allocationScope, ptr));
255
256         return ptr;
257 }
258
259 void* AllocationCallbackRecorder::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
260 {
261         void* const     ptr     = ChainedAllocator::reallocate(original, size, alignment, allocationScope);
262
263         m_records.append(AllocationCallbackRecord::reallocation(original, size, alignment, allocationScope, ptr));
264
265         return ptr;
266 }
267
268 void AllocationCallbackRecorder::free (void* mem)
269 {
270         ChainedAllocator::free(mem);
271
272         m_records.append(AllocationCallbackRecord::free(mem));
273 }
274
275 void AllocationCallbackRecorder::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
276 {
277         ChainedAllocator::notifyInternalAllocation(size, allocationType, allocationScope);
278
279         m_records.append(AllocationCallbackRecord::internalAllocation(size, allocationType, allocationScope));
280 }
281
282 void AllocationCallbackRecorder::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
283 {
284         ChainedAllocator::notifyInternalFree(size, allocationType, allocationScope);
285
286         m_records.append(AllocationCallbackRecord::internalFree(size, allocationType, allocationScope));
287 }
288
289 // DeterministicFailAllocator
290
291 DeterministicFailAllocator::DeterministicFailAllocator (const VkAllocationCallbacks* allocator, deUint32 numPassingAllocs)
292         : ChainedAllocator      (allocator)
293         , m_numPassingAllocs(numPassingAllocs)
294         , m_allocationNdx       (0)
295 {
296 }
297
298 DeterministicFailAllocator::~DeterministicFailAllocator (void)
299 {
300 }
301
302 void* DeterministicFailAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
303 {
304         if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
305                 return ChainedAllocator::allocate(size, alignment, allocationScope);
306         else
307                 return DE_NULL;
308 }
309
310 void* DeterministicFailAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
311 {
312         if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
313                 return ChainedAllocator::reallocate(original, size, alignment, allocationScope);
314         else
315                 return DE_NULL;
316 }
317
318 // Utils
319
320 AllocationCallbackValidationResults::AllocationCallbackValidationResults (void)
321 {
322         deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
323 }
324
325 void AllocationCallbackValidationResults::clear (void)
326 {
327         liveAllocations.clear();
328         violations.clear();
329         deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
330 }
331
332 namespace
333 {
334
335 struct AllocationSlot
336 {
337         AllocationCallbackRecord        record;
338         bool                                            isLive;
339
340         AllocationSlot (void)
341                 : isLive        (false)
342         {}
343
344         AllocationSlot (const AllocationCallbackRecord& record_, bool isLive_)
345                 : record        (record_)
346                 , isLive        (isLive_)
347         {}
348 };
349
350 size_t getAlignment (const AllocationCallbackRecord& record)
351 {
352         if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION)
353                 return record.data.allocation.alignment;
354         else if (record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
355                 return record.data.reallocation.alignment;
356         else
357         {
358                 DE_ASSERT(false);
359                 return 0;
360         }
361 }
362
363 } // anonymous
364
365 void validateAllocationCallbacks (const AllocationCallbackRecorder& recorder, AllocationCallbackValidationResults* results)
366 {
367         std::vector<AllocationSlot>             allocations;
368         std::map<void*, size_t>                 ptrToSlotIndex;
369
370         DE_ASSERT(results->liveAllocations.empty() && results->violations.empty());
371
372         for (AllocationCallbackRecorder::RecordIterator callbackIter = recorder.getRecordsBegin();
373                  callbackIter != recorder.getRecordsEnd();
374                  ++callbackIter)
375         {
376                 const AllocationCallbackRecord&         record  = *callbackIter;
377
378                 // Validate scope
379                 {
380                         const VkSystemAllocationScope* const    scopePtr        = record.type == AllocationCallbackRecord::TYPE_ALLOCATION                      ? &record.data.allocation.scope
381                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_REALLOCATION            ? &record.data.reallocation.scope
382                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION     ? &record.data.internalAllocation.scope
383                                                                                                                                 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE           ? &record.data.internalAllocation.scope
384                                                                                                                                 : DE_NULL;
385
386                         if (scopePtr && !de::inBounds(*scopePtr, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST))
387                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE));
388                 }
389
390                 // Validate alignment
391                 if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION ||
392                         record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
393                 {
394                         if (!deIsPowerOfTwoSize(getAlignment(record)))
395                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALIGNMENT));
396                 }
397
398                 // Validate actual allocation behavior
399                 switch (record.type)
400                 {
401                         case AllocationCallbackRecord::TYPE_ALLOCATION:
402                         {
403                                 DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.allocation.returnedPtr));
404
405                                 if (record.data.allocation.returnedPtr)
406                                 {
407                                         ptrToSlotIndex[record.data.allocation.returnedPtr] = allocations.size();
408                                         allocations.push_back(AllocationSlot(record, true));
409                                 }
410
411                                 break;
412                         }
413
414                         case AllocationCallbackRecord::TYPE_REALLOCATION:
415                         {
416                                 if (de::contains(ptrToSlotIndex, record.data.reallocation.original))
417                                 {
418                                         const size_t            origSlotNdx             = ptrToSlotIndex[record.data.reallocation.original];
419                                         AllocationSlot&         origSlot                = allocations[origSlotNdx];
420
421                                         DE_ASSERT(record.data.reallocation.original != DE_NULL);
422
423                                         if (record.data.reallocation.size > 0)
424                                         {
425                                                 if (getAlignment(origSlot.record) != record.data.reallocation.alignment)
426                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT));
427
428                                                 if (record.data.reallocation.original == record.data.reallocation.returnedPtr)
429                                                 {
430                                                         if (!origSlot.isLive)
431                                                         {
432                                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_FREED_PTR));
433                                                                 origSlot.isLive = true; // Mark live to suppress further errors
434                                                         }
435
436                                                         // Just update slot record
437                                                         allocations[origSlotNdx].record = record;
438                                                 }
439                                                 else
440                                                 {
441                                                         DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
442
443                                                         if (record.data.reallocation.returnedPtr)
444                                                         {
445                                                                 allocations[origSlotNdx].isLive = false;
446                                                                 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
447                                                                 allocations.push_back(AllocationSlot(record, true));
448                                                         }
449                                                         // else original ptr remains valid and live
450                                                 }
451                                         }
452                                         else
453                                         {
454                                                 DE_ASSERT(!record.data.reallocation.returnedPtr);
455
456                                                 origSlot.isLive = false;
457                                         }
458                                 }
459                                 else
460                                 {
461                                         if (record.data.reallocation.original)
462                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR));
463
464                                         if (record.data.reallocation.returnedPtr)
465                                         {
466                                                 DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
467                                                 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
468                                                 allocations.push_back(AllocationSlot(record, true));
469                                         }
470                                 }
471
472                                 break;
473                         }
474
475                         case AllocationCallbackRecord::TYPE_FREE:
476                         {
477                                 if (record.data.free.mem != DE_NULL) // Freeing null pointer is valid and ignored
478                                 {
479                                         if (de::contains(ptrToSlotIndex, record.data.free.mem))
480                                         {
481                                                 const size_t    slotNdx         = ptrToSlotIndex[record.data.free.mem];
482
483                                                 if (allocations[slotNdx].isLive)
484                                                         allocations[slotNdx].isLive = false;
485                                                 else
486                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_DOUBLE_FREE));
487                                         }
488                                         else
489                                                 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR));
490                                 }
491
492                                 break;
493                         }
494
495                         case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
496                         case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
497                         {
498                                 if (de::inBounds(record.data.internalAllocation.type, (VkInternalAllocationType)0, VK_INTERNAL_ALLOCATION_TYPE_LAST))
499                                 {
500                                         size_t* const           totalAllocSizePtr       = &results->internalAllocationTotal[record.data.internalAllocation.type][record.data.internalAllocation.scope];
501                                         const size_t            size                            = record.data.internalAllocation.size;
502
503                                         if (record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE)
504                                         {
505                                                 if (*totalAllocSizePtr < size)
506                                                 {
507                                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL));
508                                                         *totalAllocSizePtr = 0; // Reset to 0 to suppress compound errors
509                                                 }
510                                                 else
511                                                         *totalAllocSizePtr -= size;
512                                         }
513                                         else
514                                                 *totalAllocSizePtr += size;
515                                 }
516                                 else
517                                         results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE));
518
519                                 break;
520                         }
521
522                         default:
523                                 DE_ASSERT(false);
524                 }
525         }
526
527         DE_ASSERT(!de::contains(ptrToSlotIndex, DE_NULL));
528
529         // Collect live allocations
530         for (std::vector<AllocationSlot>::const_iterator slotIter = allocations.begin();
531                  slotIter != allocations.end();
532                  ++slotIter)
533         {
534                 if (slotIter->isLive)
535                         results->liveAllocations.push_back(slotIter->record);
536         }
537 }
538
539 bool checkAndLog (tcu::TestLog& log, const AllocationCallbackValidationResults& results, deUint32 allowedLiveAllocScopeBits)
540 {
541         using tcu::TestLog;
542
543         size_t  numLeaks        = 0;
544
545         if (!results.violations.empty())
546         {
547                 for (size_t violationNdx = 0; violationNdx < results.violations.size(); ++violationNdx)
548                 {
549                         log << TestLog::Message << "VIOLATION " << (violationNdx+1)
550                                                                                                         << ": " << results.violations[violationNdx]
551                                                                                                         << " (" << results.violations[violationNdx].record << ")"
552                                 << TestLog::EndMessage;
553                 }
554
555                 log << TestLog::Message << "ERROR: Found " << results.violations.size() << " invalid allocation callbacks!" << TestLog::EndMessage;
556         }
557
558         // Verify live allocations
559         for (size_t liveNdx = 0; liveNdx < results.liveAllocations.size(); ++liveNdx)
560         {
561                 const AllocationCallbackRecord&         record  = results.liveAllocations[liveNdx];
562                 const VkSystemAllocationScope           scope   = record.type == AllocationCallbackRecord::TYPE_ALLOCATION              ? record.data.allocation.scope
563                                                                                                         : record.type == AllocationCallbackRecord::TYPE_REALLOCATION    ? record.data.reallocation.scope
564                                                                                                         : VK_SYSTEM_ALLOCATION_SCOPE_LAST;
565
566                 DE_ASSERT(de::inBounds(scope, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST));
567
568                 if ((allowedLiveAllocScopeBits & (1u << scope)) == 0)
569                 {
570                         log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << record << TestLog::EndMessage;
571                         numLeaks += 1;
572                 }
573         }
574
575         // Verify internal allocations
576         for (int internalAllocTypeNdx = 0; internalAllocTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocTypeNdx)
577         {
578                 for (int scopeNdx = 0; scopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++scopeNdx)
579                 {
580                         const VkInternalAllocationType  type                    = (VkInternalAllocationType)internalAllocTypeNdx;
581                         const VkSystemAllocationScope   scope                   = (VkSystemAllocationScope)scopeNdx;
582                         const size_t                                    totalAllocated  = results.internalAllocationTotal[type][scope];
583
584                         if ((allowedLiveAllocScopeBits & (1u << scopeNdx)) == 0 &&
585                                 totalAllocated > 0)
586                         {
587                                 log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << totalAllocated
588                                                                                 << " bytes of (" << type << ", " << scope << ") internal memory is still allocated"
589                                         << TestLog::EndMessage;
590                                 numLeaks += 1;
591                         }
592                 }
593         }
594
595         if (numLeaks > 0)
596                 log << TestLog::Message << "ERROR: Found " << numLeaks << " memory leaks!" << TestLog::EndMessage;
597
598         return results.violations.empty() && numLeaks == 0;
599 }
600
601 bool validateAndLog (tcu::TestLog& log, const AllocationCallbackRecorder& recorder, deUint32 allowedLiveAllocScopeBits)
602 {
603         AllocationCallbackValidationResults     validationResults;
604
605         validateAllocationCallbacks(recorder, &validationResults);
606
607         return checkAndLog(log, validationResults, allowedLiveAllocScopeBits);
608 }
609
610 std::ostream& operator<< (std::ostream& str, const AllocationCallbackRecord& record)
611 {
612         switch (record.type)
613         {
614                 case AllocationCallbackRecord::TYPE_ALLOCATION:
615                         str << "ALLOCATION: size=" << record.data.allocation.size
616                                 << ", alignment=" << record.data.allocation.alignment
617                                 << ", scope=" << record.data.allocation.scope
618                                 << ", returnedPtr=" << tcu::toHex(record.data.allocation.returnedPtr);
619                         break;
620
621                 case AllocationCallbackRecord::TYPE_REALLOCATION:
622                         str << "REALLOCATION: original=" << tcu::toHex(record.data.reallocation.original)
623                                 << ", size=" << record.data.reallocation.size
624                                 << ", alignment=" << record.data.reallocation.alignment
625                                 << ", scope=" << record.data.reallocation.scope
626                                 << ", returnedPtr=" << tcu::toHex(record.data.reallocation.returnedPtr);
627                         break;
628
629                 case AllocationCallbackRecord::TYPE_FREE:
630                         str << "FREE: mem=" << tcu::toHex(record.data.free.mem);
631                         break;
632
633                 case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
634                 case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
635                         str << "INTERNAL_" << (record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? "ALLOCATION" : "FREE")
636                                 << ": size=" << record.data.internalAllocation.size
637                                 << ", type=" << record.data.internalAllocation.type
638                                 << ", scope=" << record.data.internalAllocation.scope;
639                         break;
640
641                 default:
642                         DE_ASSERT(false);
643         }
644
645         return str;
646 }
647
648 std::ostream& operator<< (std::ostream& str, const AllocationCallbackViolation& violation)
649 {
650         switch (violation.reason)
651         {
652                 case AllocationCallbackViolation::REASON_DOUBLE_FREE:
653                 {
654                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
655                         str << "Double free of " << tcu::toHex(violation.record.data.free.mem);
656                         break;
657                 }
658
659                 case AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR:
660                 {
661                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
662                         str << "Attempt to free " << tcu::toHex(violation.record.data.free.mem) << " which has not been allocated";
663                         break;
664                 }
665
666                 case AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR:
667                 {
668                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
669                         str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has not been allocated";
670                         break;
671                 }
672
673                 case AllocationCallbackViolation::REASON_REALLOC_FREED_PTR:
674                 {
675                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
676                         str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has been freed";
677                         break;
678                 }
679
680                 case AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL:
681                 {
682                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
683                         str << "Internal allocation total for (" << violation.record.data.internalAllocation.type << ", " << violation.record.data.internalAllocation.scope << ") is negative";
684                         break;
685                 }
686
687                 case AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE:
688                 {
689                         DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ||
690                                           violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
691                         str << "Invalid internal allocation type " << tcu::toHex(violation.record.data.internalAllocation.type);
692                         break;
693                 }
694
695                 case AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE:
696                 {
697                         str << "Invalid allocation scope";
698                         break;
699                 }
700
701                 case AllocationCallbackViolation::REASON_INVALID_ALIGNMENT:
702                 {
703                         str << "Invalid alignment";
704                         break;
705                 }
706
707                 case AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT:
708                 {
709                         str << "Reallocation with different alignment";
710                         break;
711                 }
712
713                 default:
714                         DE_ASSERT(false);
715         }
716
717         return str;
718 }
719
720 } // vk