resolve merge conflicts of 59febde9 to deqp-dev
[platform/upstream/VK-GL-CTS.git] / external / vulkancts / modules / vulkan / memory / vktMemoryMappingTests.cpp
1 /*-------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 Google Inc.
6  *
7  * Licensed under the Apache License, Version 2.0 (the "License");
8  * you may not use this file except in compliance with the License.
9  * You may obtain a copy of the License at
10  *
11  *      http://www.apache.org/licenses/LICENSE-2.0
12  *
13  * Unless required by applicable law or agreed to in writing, software
14  * distributed under the License is distributed on an "AS IS" BASIS,
15  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16  * See the License for the specific language governing permissions and
17  * limitations under the License.
18  *
19  *//*!
20  * \file
21  * \brief Simple memory mapping tests.
22  *//*--------------------------------------------------------------------*/
23
24 #include "vktMemoryMappingTests.hpp"
25
26 #include "vktTestCaseUtil.hpp"
27
28 #include "tcuMaybe.hpp"
29 #include "tcuResultCollector.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuPlatform.hpp"
32
33 #include "vkDeviceUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkRef.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkStrUtil.hpp"
39 #include "vkAllocationCallbackUtil.hpp"
40
41 #include "deRandom.hpp"
42 #include "deSharedPtr.hpp"
43 #include "deStringUtil.hpp"
44 #include "deUniquePtr.hpp"
45 #include "deSTLUtil.hpp"
46 #include "deMath.h"
47
48 #include <string>
49 #include <vector>
50 #include <algorithm>
51
52 using tcu::Maybe;
53 using tcu::TestLog;
54
55 using de::SharedPtr;
56
57 using std::string;
58 using std::vector;
59 using std::pair;
60
61 using namespace vk;
62
63 namespace vkt
64 {
65 namespace memory
66 {
67 namespace
68 {
69 template<typename T>
70 T divRoundUp (const T& a, const T& b)
71 {
72         return (a / b) + (a % b == 0 ? 0 : 1);
73 }
74
75 template<typename T>
76 T roundDownToMultiple (const T& a, const T& b)
77 {
78         return b * (a / b);
79 }
80
81 template<typename T>
82 T roundUpToMultiple (const T& a, const T& b)
83 {
84         return b * (a / b + (a % b != 0 ? 1 : 0));
85 }
86
87 enum AllocationKind
88 {
89         ALLOCATION_KIND_SUBALLOCATED                                                                            = 0,
90         ALLOCATION_KIND_DEDICATED_BUFFER                                                                        = 1,
91         ALLOCATION_KIND_DEDICATED_IMAGE                                                                         = 2,
92         ALLOCATION_KIND_LAST
93 };
94
95 // \note Bit vector that guarantees that each value takes only one bit.
96 // std::vector<bool> is often optimized to only take one bit for each bool, but
97 // that is implementation detail and in this case we really need to known how much
98 // memory is used.
99 class BitVector
100 {
101 public:
102         enum
103         {
104                 BLOCK_BIT_SIZE = 8 * sizeof(deUint32)
105         };
106
107         BitVector (size_t size, bool value = false)
108                 : m_data(divRoundUp<size_t>(size, (size_t)BLOCK_BIT_SIZE), value ? ~0x0u : 0x0u)
109         {
110         }
111
112         bool get (size_t ndx) const
113         {
114                 return (m_data[ndx / BLOCK_BIT_SIZE] & (0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE))) != 0;
115         }
116
117         void set (size_t ndx, bool value)
118         {
119                 if (value)
120                         m_data[ndx / BLOCK_BIT_SIZE] |= 0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE);
121                 else
122                         m_data[ndx / BLOCK_BIT_SIZE] &= ~(0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE));
123         }
124
125         void setRange (size_t offset, size_t count, bool value)
126         {
127                 size_t ndx = offset;
128
129                 for (; (ndx < offset + count) && ((ndx % BLOCK_BIT_SIZE) != 0); ndx++)
130                 {
131                         DE_ASSERT(ndx >= offset);
132                         DE_ASSERT(ndx < offset + count);
133                         set(ndx, value);
134                 }
135
136                 {
137                         const size_t endOfFullBlockNdx = roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE);
138
139                         if (ndx < endOfFullBlockNdx)
140                         {
141                                 deMemset(&m_data[ndx / BLOCK_BIT_SIZE], (value ? 0xFF : 0x0), (endOfFullBlockNdx - ndx) / 8);
142                                 ndx = endOfFullBlockNdx;
143                         }
144                 }
145
146                 for (; ndx < offset + count; ndx++)
147                 {
148                         DE_ASSERT(ndx >= offset);
149                         DE_ASSERT(ndx < offset + count);
150                         set(ndx, value);
151                 }
152         }
153
154         void vectorAnd (const BitVector& other, size_t offset, size_t count)
155         {
156                 size_t ndx = offset;
157
158                 for (; ndx < offset + count && (ndx % BLOCK_BIT_SIZE) != 0; ndx++)
159                 {
160                         DE_ASSERT(ndx >= offset);
161                         DE_ASSERT(ndx < offset + count);
162                         set(ndx, other.get(ndx) && get(ndx));
163                 }
164
165                 for (; ndx < roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE); ndx += BLOCK_BIT_SIZE)
166                 {
167                         DE_ASSERT(ndx >= offset);
168                         DE_ASSERT(ndx < offset + count);
169                         DE_ASSERT(ndx % BLOCK_BIT_SIZE == 0);
170                         DE_ASSERT(ndx + BLOCK_BIT_SIZE <= offset + count);
171                         m_data[ndx / BLOCK_BIT_SIZE] &= other.m_data[ndx / BLOCK_BIT_SIZE];
172                 }
173
174                 for (; ndx < offset + count; ndx++)
175                 {
176                         DE_ASSERT(ndx >= offset);
177                         DE_ASSERT(ndx < offset + count);
178                         set(ndx, other.get(ndx) && get(ndx));
179                 }
180         }
181
182 private:
183         vector<deUint32>        m_data;
184 };
185
186 class ReferenceMemory
187 {
188 public:
189         ReferenceMemory (size_t size, size_t atomSize)
190                 : m_atomSize    (atomSize)
191                 , m_bytes               (size, 0xDEu)
192                 , m_defined             (size, false)
193                 , m_flushed             (size / atomSize, false)
194         {
195                 DE_ASSERT(size % m_atomSize == 0);
196         }
197
198         void write (size_t pos, deUint8 value)
199         {
200                 m_bytes[pos] = value;
201                 m_defined.set(pos, true);
202                 m_flushed.set(pos / m_atomSize, false);
203         }
204
205         bool read (size_t pos, deUint8 value)
206         {
207                 const bool isOk = !m_defined.get(pos)
208                                                 || m_bytes[pos] == value;
209
210                 m_bytes[pos] = value;
211                 m_defined.set(pos, true);
212
213                 return isOk;
214         }
215
216         bool modifyXor (size_t pos, deUint8 value, deUint8 mask)
217         {
218                 const bool isOk = !m_defined.get(pos)
219                                                 || m_bytes[pos] == value;
220
221                 m_bytes[pos] = value ^ mask;
222                 m_defined.set(pos, true);
223                 m_flushed.set(pos / m_atomSize, false);
224
225                 return isOk;
226         }
227
228         void flush (size_t offset, size_t size)
229         {
230                 DE_ASSERT((offset % m_atomSize) == 0);
231                 DE_ASSERT((size % m_atomSize) == 0);
232
233                 m_flushed.setRange(offset / m_atomSize, size / m_atomSize, true);
234         }
235
236         void invalidate (size_t offset, size_t size)
237         {
238                 DE_ASSERT((offset % m_atomSize) == 0);
239                 DE_ASSERT((size % m_atomSize) == 0);
240
241                 if (m_atomSize == 1)
242                 {
243                         m_defined.vectorAnd(m_flushed, offset, size);
244                 }
245                 else
246                 {
247                         for (size_t ndx = 0; ndx < size / m_atomSize; ndx++)
248                         {
249                                 if (!m_flushed.get((offset / m_atomSize) + ndx))
250                                         m_defined.setRange(offset + ndx * m_atomSize, m_atomSize, false);
251                         }
252                 }
253         }
254
255
256 private:
257         const size_t    m_atomSize;
258         vector<deUint8> m_bytes;
259         BitVector               m_defined;
260         BitVector               m_flushed;
261 };
262
263 struct MemoryType
264 {
265         MemoryType              (deUint32 index_, const VkMemoryType& type_)
266                 : index (index_)
267                 , type  (type_)
268         {
269         }
270
271         MemoryType              (void)
272                 : index (~0u)
273         {
274         }
275
276         deUint32                index;
277         VkMemoryType    type;
278 };
279
280 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
281 {
282         AllocationCallbackRecorder      callbackRecorder        (getSystemAllocator());
283
284         {
285                 // 1 B allocation from memory type 0
286                 const VkMemoryAllocateInfo      allocInfo       =
287                 {
288                         VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
289                         DE_NULL,
290                         1u,
291                         0u,
292                 };
293                 const Unique<VkDeviceMemory>                    memory                  (allocateMemory(vk, device, &allocInfo));
294                 AllocationCallbackValidationResults             validateRes;
295
296                 validateAllocationCallbacks(callbackRecorder, &validateRes);
297
298                 TCU_CHECK(validateRes.violations.empty());
299
300                 return getLiveSystemAllocationTotal(validateRes)
301                            + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
302         }
303 }
304
305 Move<VkImage> makeImage (const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
306 {
307         const VkDeviceSize                                      sizeInPixels                                    = (size + 3u) / 4u;
308         const deUint32                                          sqrtSize                                                = static_cast<deUint32>(deFloatCeil(deFloatSqrt(static_cast<float>(sizeInPixels))));
309         const deUint32                                          powerOfTwoSize                                  = deMaxu32(deSmallestGreaterOrEquallPowerOfTwoU32(sqrtSize), 4096u);
310         const VkImageCreateInfo                         colorImageParams                                =
311         {
312                 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,                                                    // VkStructureType                      sType;
313                 DE_NULL,                                                                                                                // const void*                          pNext;
314                 0u,                                                                                                                             // VkImageCreateFlags           flags;
315                 VK_IMAGE_TYPE_2D,                                                                                               // VkImageType                          imageType;
316                 VK_FORMAT_R8G8B8A8_UINT,                                                                                // VkFormat                                     format;
317                 {
318                         powerOfTwoSize,
319                         powerOfTwoSize,
320                         1u
321                 },                                                                                                                              // VkExtent3D                           extent;
322                 1u,                                                                                                                             // deUint32                                     mipLevels;
323                 1u,                                                                                                                             // deUint32                                     arraySize;
324                 VK_SAMPLE_COUNT_1_BIT,                                                                                  // deUint32                                     samples;
325                 VK_IMAGE_TILING_LINEAR,                                                                                 // VkImageTiling                        tiling;
326                 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags         usage;
327                 VK_SHARING_MODE_EXCLUSIVE,                                                                              // VkSharingMode                        sharingMode;
328                 1u,                                                                                                                             // deUint32                                     queueFamilyCount;
329                 &queueFamilyIndex,                                                                                              // const deUint32*                      pQueueFamilyIndices;
330                 VK_IMAGE_LAYOUT_UNDEFINED,                                                                              // VkImageLayout                        initialLayout;
331         };
332
333         return createImage(vk, device, &colorImageParams);
334 }
335
336 Move<VkBuffer> makeBuffer(const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
337 {
338         const VkBufferCreateInfo                        bufferParams                                    =
339         {
340                 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,                                                   //      VkStructureType                 sType;
341                 DE_NULL,                                                                                                                //      const void*                             pNext;
342                 0u,                                                                                                                             //      VkBufferCreateFlags             flags;
343                 size,                                                                                                                   //      VkDeviceSize                    size;
344                 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags      usage;
345                 VK_SHARING_MODE_EXCLUSIVE,                                                                              //      VkSharingMode                   sharingMode;
346                 1u,                                                                                                                             //      deUint32                                queueFamilyCount;
347                 &queueFamilyIndex,                                                                                              //      const deUint32*                 pQueueFamilyIndices;
348         };
349         return vk::createBuffer(vk, device, &bufferParams, (const VkAllocationCallbacks*)DE_NULL);
350 }
351
352 VkMemoryRequirements getImageMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkImage>& image)
353 {
354         VkImageMemoryRequirementsInfo2KHR       info                                                    =
355         {
356                 VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR,                 // VkStructureType                      sType
357                 DE_NULL,                                                                                                                // const void*                          pNext
358                 *image                                                                                                                  // VkImage                                      image
359         };
360         VkMemoryDedicatedRequirementsKHR        dedicatedRequirements                   =
361         {
362                 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,                    // VkStructureType                      sType
363                 DE_NULL,                                                                                                                // const void*                          pNext
364                 VK_FALSE,                                                                                                               // VkBool32                                     prefersDedicatedAllocation
365                 VK_FALSE                                                                                                                // VkBool32                                     requiresDedicatedAllocation
366         };
367         VkMemoryRequirements2KHR                        req2                                                    =
368         {
369                 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,                                    // VkStructureType                      sType
370                 &dedicatedRequirements,                                                                                 // void*                                        pNext
371                 {0, 0, 0}                                                                                                               // VkMemoryRequirements         memoryRequirements
372         };
373
374         vk.getImageMemoryRequirements2KHR(device, &info, &req2);
375
376         return req2.memoryRequirements;
377 }
378
379 VkMemoryRequirements getBufferMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkBuffer>& buffer)
380 {
381         VkBufferMemoryRequirementsInfo2KHR      info                                                    =
382         {
383                 VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR,                // VkStructureType                      sType
384                 DE_NULL,                                                                                                                // const void*                          pNext
385                 *buffer                                                                                                                 // VkImage                                      image
386         };
387         VkMemoryDedicatedRequirementsKHR        dedicatedRequirements                   =
388         {
389                 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,                    // VkStructureType                      sType
390                 DE_NULL,                                                                                                                // const void*                          pNext
391                 VK_FALSE,                                                                                                               // VkBool32                                     prefersDedicatedAllocation
392                 VK_FALSE                                                                                                                // VkBool32                                     requiresDedicatedAllocation
393         };
394         VkMemoryRequirements2KHR                        req2                                                    =
395         {
396                 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,                            // VkStructureType              sType
397                 &dedicatedRequirements,                                                                         // void*                                pNext
398                 {0, 0, 0}                                                                                                       // VkMemoryRequirements memoryRequirements
399         };
400
401         vk.getBufferMemoryRequirements2KHR(device, &info, &req2);
402
403         return req2.memoryRequirements;
404 }
405
406 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex)
407 {
408         const VkMemoryAllocateInfo                      pAllocInfo                                              =
409         {
410                 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
411                 DE_NULL,
412                 pAllocInfo_allocationSize,
413                 pAllocInfo_memoryTypeIndex,
414         };
415         return allocateMemory(vk, device, &pAllocInfo);
416 }
417
418 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex, Move<VkImage>& image, Move<VkBuffer>& buffer)
419 {
420         DE_ASSERT((!image) || (!buffer));
421
422         const VkMemoryDedicatedAllocateInfoKHR
423                                                                                 dedicatedAllocateInfo                   =
424         {
425                 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,                   // VkStructureType              sType
426                 DE_NULL,                                                                                                                // const void*                  pNext
427                 *image,                                                                                                                 // VkImage                              image
428                 *buffer                                                                                                                 // VkBuffer                             buffer
429         };
430
431         const VkMemoryAllocateInfo                      pAllocInfo                                              =
432         {
433                 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
434                 !image && !buffer ? DE_NULL : &dedicatedAllocateInfo,
435                 pAllocInfo_allocationSize,
436                 pAllocInfo_memoryTypeIndex,
437         };
438         return allocateMemory(vk, device, &pAllocInfo);
439 }
440
441 struct MemoryRange
442 {
443         MemoryRange (VkDeviceSize offset_ = ~(VkDeviceSize)0, VkDeviceSize size_ = ~(VkDeviceSize)0)
444                 : offset        (offset_)
445                 , size          (size_)
446         {
447         }
448
449         VkDeviceSize    offset;
450         VkDeviceSize    size;
451 };
452
453 struct TestConfig
454 {
455         TestConfig (void)
456                 : allocationSize        (~(VkDeviceSize)0)
457                 , allocationKind        (ALLOCATION_KIND_SUBALLOCATED)
458         {
459         }
460
461         VkDeviceSize            allocationSize;
462         deUint32                        seed;
463
464         MemoryRange                     mapping;
465         vector<MemoryRange>     flushMappings;
466         vector<MemoryRange>     invalidateMappings;
467         bool                            remap;
468         AllocationKind          allocationKind;
469 };
470
471 bool compareAndLogBuffer (TestLog& log, size_t size, const deUint8* result, const deUint8* reference)
472 {
473         size_t  failedBytes     = 0;
474         size_t  firstFailed     = (size_t)-1;
475
476         for (size_t ndx = 0; ndx < size; ndx++)
477         {
478                 if (result[ndx] != reference[ndx])
479                 {
480                         failedBytes++;
481
482                         if (firstFailed == (size_t)-1)
483                                 firstFailed = ndx;
484                 }
485         }
486
487         if (failedBytes > 0)
488         {
489                 log << TestLog::Message << "Comparison failed. Failed bytes " << failedBytes << ". First failed at offset " << firstFailed << "." << TestLog::EndMessage;
490
491                 std::ostringstream      expectedValues;
492                 std::ostringstream      resultValues;
493
494                 for (size_t ndx = firstFailed; ndx < firstFailed + 10 && ndx < size; ndx++)
495                 {
496                         if (ndx != firstFailed)
497                         {
498                                 expectedValues << ", ";
499                                 resultValues << ", ";
500                         }
501
502                         expectedValues << reference[ndx];
503                         resultValues << result[ndx];
504                 }
505
506                 if (firstFailed + 10 < size)
507                 {
508                         expectedValues << "...";
509                         resultValues << "...";
510                 }
511
512                 log << TestLog::Message << "Expected values at offset: " << firstFailed << ", " << expectedValues.str() << TestLog::EndMessage;
513                 log << TestLog::Message << "Result values at offset: " << firstFailed << ", " << resultValues.str() << TestLog::EndMessage;
514
515                 return false;
516         }
517         else
518                 return true;
519 }
520
521 tcu::TestStatus testMemoryMapping (Context& context, const TestConfig config)
522 {
523         TestLog&                                                                log                                                     = context.getTestContext().getLog();
524         tcu::ResultCollector                                    result                                          (log);
525         bool                                                                    atLeastOneTestPerformed         = false;
526         const VkPhysicalDevice                                  physicalDevice                          = context.getPhysicalDevice();
527         const VkDevice                                                  device                                          = context.getDevice();
528         const InstanceInterface&                                vki                                                     = context.getInstanceInterface();
529         const DeviceInterface&                                  vkd                                                     = context.getDeviceInterface();
530         const VkPhysicalDeviceMemoryProperties  memoryProperties                        = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
531         // \todo [2016-05-27 misojarvi] Remove once drivers start reporting correctly nonCoherentAtomSize that is at least 1.
532         const VkDeviceSize                                              nonCoherentAtomSize                     = context.getDeviceProperties().limits.nonCoherentAtomSize != 0
533                                                                                                                                                 ? context.getDeviceProperties().limits.nonCoherentAtomSize
534                                                                                                                                                 : 1;
535         const deUint32                                                  queueFamilyIndex                        = context.getUniversalQueueFamilyIndex();
536
537         if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE
538         ||      config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
539         {
540                 const std::vector<std::string>&         extensions                                      = context.getDeviceExtensions();
541                 const deBool                                            isSupported                                     = std::find(extensions.begin(), extensions.end(), "VK_KHR_dedicated_allocation") != extensions.end();
542                 if (!isSupported)
543                 {
544                         TCU_THROW(NotSupportedError, "Not supported");
545                 }
546         }
547
548         {
549                 const tcu::ScopedLogSection     section (log, "TestCaseInfo", "TestCaseInfo");
550
551                 log << TestLog::Message << "Seed: " << config.seed << TestLog::EndMessage;
552                 log << TestLog::Message << "Allocation size: " << config.allocationSize << " * atom" <<  TestLog::EndMessage;
553                 log << TestLog::Message << "Mapping, offset: " << config.mapping.offset << " * atom, size: " << config.mapping.size << " * atom" << TestLog::EndMessage;
554
555                 if (!config.flushMappings.empty())
556                 {
557                         log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
558
559                         for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
560                                 log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset << " * atom, Size: " << config.flushMappings[ndx].size << " * atom" << TestLog::EndMessage;
561                 }
562
563                 if (config.remap)
564                         log << TestLog::Message << "Remapping memory between flush and invalidation." << TestLog::EndMessage;
565
566                 if (!config.invalidateMappings.empty())
567                 {
568                         log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
569
570                         for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
571                                 log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset << " * atom, Size: " << config.invalidateMappings[ndx].size << " * atom" << TestLog::EndMessage;
572                 }
573         }
574
575         for (deUint32 memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
576         {
577                 try
578                 {
579                         const tcu::ScopedLogSection             section         (log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
580                         const vk::VkMemoryType&                 memoryType      = memoryProperties.memoryTypes[memoryTypeIndex];
581                         const VkMemoryHeap&                             memoryHeap      = memoryProperties.memoryHeaps[memoryType.heapIndex];
582                         const VkDeviceSize                              atomSize        = (memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
583                                                                                                                 ? 1
584                                                                                                                 : nonCoherentAtomSize;
585
586                         VkDeviceSize                                    allocationSize                          = config.allocationSize * atomSize;
587                         vk::VkMemoryRequirements                req                                                     =
588                         {
589                                 (VkDeviceSize)allocationSize,
590                                 (VkDeviceSize)0,
591                                 ~(deUint32)0u
592                         };
593                         Move<VkImage>                                   image;
594                         Move<VkBuffer>                                  buffer;
595
596                         if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE)
597                         {
598                                 image = makeImage(vkd, device, allocationSize, queueFamilyIndex);
599                                 req = getImageMemoryRequirements(vkd, device, image);
600                         }
601                         else if (config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
602                         {
603                                 buffer = makeBuffer(vkd, device, allocationSize, queueFamilyIndex);
604                                 req = getBufferMemoryRequirements(vkd, device, buffer);
605                         }
606                         allocationSize = req.size;
607                         VkDeviceSize                                    mappingSize                                     = config.mapping.size * atomSize;
608                         VkDeviceSize                                    mappingOffset                           = config.mapping.offset * atomSize;
609                         if (config.mapping.size == config.allocationSize && config.mapping.offset == 0u)
610                         {
611                                 mappingSize = allocationSize;
612                         }
613
614                         log << TestLog::Message << "MemoryType: " << memoryType << TestLog::EndMessage;
615                         log << TestLog::Message << "MemoryHeap: " << memoryHeap << TestLog::EndMessage;
616                         log << TestLog::Message << "AtomSize: " << atomSize << TestLog::EndMessage;
617                         log << TestLog::Message << "AllocationSize: " << allocationSize << TestLog::EndMessage;
618                         log << TestLog::Message << "Mapping, offset: " << mappingOffset << ", size: " << mappingSize << TestLog::EndMessage;
619
620                         if ((req.memoryTypeBits & (1u << memoryTypeIndex)) == 0)
621                         {
622                                 static const char* const allocationKindName[] =
623                                 {
624                                         "suballocation",
625                                         "dedicated allocation of buffers",
626                                         "dedicated allocation of images"
627                                 };
628                                 log << TestLog::Message << "Memory type does not support " << allocationKindName[static_cast<deUint32>(config.allocationKind)] << '.' << TestLog::EndMessage;
629                                 continue;
630                         }
631
632                         if (!config.flushMappings.empty())
633                         {
634                                 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
635
636                                 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
637                                         log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset * atomSize << ", Size: " << config.flushMappings[ndx].size * atomSize << TestLog::EndMessage;
638                         }
639
640                         if (!config.invalidateMappings.empty())
641                         {
642                                 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
643
644                                 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
645                                         log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset * atomSize << ", Size: " << config.invalidateMappings[ndx].size * atomSize << TestLog::EndMessage;
646                         }
647
648                         if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
649                         {
650                                 log << TestLog::Message << "Memory type doesn't support mapping." << TestLog::EndMessage;
651                         }
652                         else if (memoryHeap.size <= 4 * allocationSize)
653                         {
654                                 log << TestLog::Message << "Memory type's heap is too small." << TestLog::EndMessage;
655                         }
656                         else
657                         {
658                                 atLeastOneTestPerformed = true;
659                                 const Unique<VkDeviceMemory>    memory                          (allocMemory(vkd, device, allocationSize, memoryTypeIndex, image, buffer));
660                                 de::Random                                              rng                                     (config.seed);
661                                 vector<deUint8>                                 reference                       ((size_t)(allocationSize));
662                                 deUint8*                                                mapping                         = DE_NULL;
663
664                                 {
665                                         void* ptr;
666                                         VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
667                                         TCU_CHECK(ptr);
668
669                                         mapping = (deUint8*)ptr;
670                                 }
671
672                                 for (VkDeviceSize ndx = 0; ndx < mappingSize; ndx++)
673                                 {
674                                         const deUint8 val = rng.getUint8();
675
676                                         mapping[ndx]                                                                                            = val;
677                                         reference[(size_t)(mappingOffset + ndx)]        = val;
678                                 }
679
680                                 if (!config.flushMappings.empty())
681                                 {
682                                         vector<VkMappedMemoryRange> ranges;
683
684                                         for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
685                                         {
686                                                 const VkMappedMemoryRange range =
687                                                 {
688                                                         VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
689                                                         DE_NULL,
690
691                                                         *memory,
692                                                         config.flushMappings[ndx].offset * atomSize,
693                                                         config.flushMappings[ndx].size * atomSize
694                                                 };
695
696                                                 ranges.push_back(range);
697                                         }
698
699                                         VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
700                                 }
701
702                                 if (config.remap)
703                                 {
704                                         void* ptr;
705                                         vkd.unmapMemory(device, *memory);
706                                         VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
707                                         TCU_CHECK(ptr);
708
709                                         mapping = (deUint8*)ptr;
710                                 }
711
712                                 if (!config.invalidateMappings.empty())
713                                 {
714                                         vector<VkMappedMemoryRange> ranges;
715
716                                         for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
717                                         {
718                                                 const VkMappedMemoryRange range =
719                                                 {
720                                                         VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
721                                                         DE_NULL,
722
723                                                         *memory,
724                                                         config.invalidateMappings[ndx].offset * atomSize,
725                                                         config.invalidateMappings[ndx].size * atomSize
726                                                 };
727
728                                                 ranges.push_back(range);
729                                         }
730
731                                         VK_CHECK(vkd.invalidateMappedMemoryRanges(device, static_cast<deUint32>(ranges.size()), &ranges[0]));
732                                 }
733
734                                 if (!compareAndLogBuffer(log, static_cast<size_t>(mappingSize), mapping, &reference[static_cast<size_t>(mappingOffset)]))
735                                         result.fail("Unexpected values read from mapped memory.");
736
737                                 vkd.unmapMemory(device, *memory);
738                         }
739                 }
740                 catch (const tcu::TestError& error)
741                 {
742                         result.fail(error.getMessage());
743                 }
744         }
745
746         if (!atLeastOneTestPerformed)
747                 result.addResult(QP_TEST_RESULT_NOT_SUPPORTED, "No suitable memory kind found to perform test.");
748
749         return tcu::TestStatus(result.getResult(), result.getMessage());
750 }
751
752 class MemoryMapping
753 {
754 public:
755                                                 MemoryMapping   (const MemoryRange&     range,
756                                                                                  void*                          ptr,
757                                                                                  ReferenceMemory&       reference);
758
759         void                            randomRead              (de::Random& rng);
760         void                            randomWrite             (de::Random& rng);
761         void                            randomModify    (de::Random& rng);
762
763         const MemoryRange&      getRange                (void) const { return m_range; }
764
765 private:
766         MemoryRange                     m_range;
767         void*                           m_ptr;
768         ReferenceMemory&        m_reference;
769 };
770
771 MemoryMapping::MemoryMapping (const MemoryRange&        range,
772                                                           void*                                 ptr,
773                                                           ReferenceMemory&              reference)
774         : m_range               (range)
775         , m_ptr                 (ptr)
776         , m_reference   (reference)
777 {
778         DE_ASSERT(range.size > 0);
779 }
780
781 void MemoryMapping::randomRead (de::Random& rng)
782 {
783         const size_t count = (size_t)rng.getInt(0, 100);
784
785         for (size_t ndx = 0; ndx < count; ndx++)
786         {
787                 const size_t    pos     = (size_t)(rng.getUint64() % (deUint64)m_range.size);
788                 const deUint8   val     = ((deUint8*)m_ptr)[pos];
789
790                 TCU_CHECK(m_reference.read((size_t)(m_range.offset + pos), val));
791         }
792 }
793
794 void MemoryMapping::randomWrite (de::Random& rng)
795 {
796         const size_t count = (size_t)rng.getInt(0, 100);
797
798         for (size_t ndx = 0; ndx < count; ndx++)
799         {
800                 const size_t    pos     = (size_t)(rng.getUint64() % (deUint64)m_range.size);
801                 const deUint8   val     = rng.getUint8();
802
803                 ((deUint8*)m_ptr)[pos]  = val;
804                 m_reference.write((size_t)(m_range.offset + pos), val);
805         }
806 }
807
808 void MemoryMapping::randomModify (de::Random& rng)
809 {
810         const size_t count = (size_t)rng.getInt(0, 100);
811
812         for (size_t ndx = 0; ndx < count; ndx++)
813         {
814                 const size_t    pos             = (size_t)(rng.getUint64() % (deUint64)m_range.size);
815                 const deUint8   val             = ((deUint8*)m_ptr)[pos];
816                 const deUint8   mask    = rng.getUint8();
817
818                 ((deUint8*)m_ptr)[pos]  = val ^ mask;
819                 TCU_CHECK(m_reference.modifyXor((size_t)(m_range.offset + pos), val, mask));
820         }
821 }
822
823 VkDeviceSize randomSize (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxSize)
824 {
825         const VkDeviceSize maxSizeInAtoms = maxSize / atomSize;
826
827         DE_ASSERT(maxSizeInAtoms > 0);
828
829         return maxSizeInAtoms > 1
830                         ? atomSize * (1 + (VkDeviceSize)(rng.getUint64() % (deUint64)maxSizeInAtoms))
831                         : atomSize;
832 }
833
834 VkDeviceSize randomOffset (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxOffset)
835 {
836         const VkDeviceSize maxOffsetInAtoms = maxOffset / atomSize;
837
838         return maxOffsetInAtoms > 0
839                         ? atomSize * (VkDeviceSize)(rng.getUint64() % (deUint64)(maxOffsetInAtoms + 1))
840                         : 0;
841 }
842
843 void randomRanges (de::Random& rng, vector<VkMappedMemoryRange>& ranges, size_t count, VkDeviceMemory memory, VkDeviceSize minOffset, VkDeviceSize maxSize, VkDeviceSize atomSize)
844 {
845         ranges.resize(count);
846
847         for (size_t rangeNdx = 0; rangeNdx < count; rangeNdx++)
848         {
849                 const VkDeviceSize      size    = randomSize(rng, atomSize, maxSize);
850                 const VkDeviceSize      offset  = minOffset + randomOffset(rng, atomSize, maxSize - size);
851
852                 const VkMappedMemoryRange range =
853                 {
854                         VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
855                         DE_NULL,
856
857                         memory,
858                         offset,
859                         size
860                 };
861                 ranges[rangeNdx] = range;
862         }
863 }
864
865 class MemoryObject
866 {
867 public:
868                                                         MemoryObject                    (const DeviceInterface&         vkd,
869                                                                                                          VkDevice                                       device,
870                                                                                                          VkDeviceSize                           size,
871                                                                                                          deUint32                                       memoryTypeIndex,
872                                                                                                          VkDeviceSize                           atomSize,
873                                                                                                          VkDeviceSize                           memoryUsage,
874                                                                                                          VkDeviceSize                           referenceMemoryUsage);
875
876                                                         ~MemoryObject                   (void);
877
878         MemoryMapping*                  mapRandom                               (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
879         void                                    unmap                                   (void);
880
881         void                                    randomFlush                             (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
882         void                                    randomInvalidate                (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
883
884         VkDeviceSize                    getSize                                 (void) const { return m_size; }
885         MemoryMapping*                  getMapping                              (void) { return m_mapping; }
886
887         VkDeviceSize                    getMemoryUsage                  (void) const { return m_memoryUsage; }
888         VkDeviceSize                    getReferenceMemoryUsage (void) const { return m_referenceMemoryUsage; }
889 private:
890         const DeviceInterface&  m_vkd;
891         const VkDevice                  m_device;
892
893         const deUint32                  m_memoryTypeIndex;
894         const VkDeviceSize              m_size;
895         const VkDeviceSize              m_atomSize;
896         const VkDeviceSize              m_memoryUsage;
897         const VkDeviceSize              m_referenceMemoryUsage;
898
899         Move<VkDeviceMemory>    m_memory;
900
901         MemoryMapping*                  m_mapping;
902         ReferenceMemory                 m_referenceMemory;
903 };
904
905 MemoryObject::MemoryObject (const DeviceInterface&              vkd,
906                                                         VkDevice                                        device,
907                                                         VkDeviceSize                            size,
908                                                         deUint32                                        memoryTypeIndex,
909                                                         VkDeviceSize                            atomSize,
910                                                         VkDeviceSize                            memoryUsage,
911                                                         VkDeviceSize                            referenceMemoryUsage)
912         : m_vkd                                         (vkd)
913         , m_device                                      (device)
914         , m_memoryTypeIndex                     (memoryTypeIndex)
915         , m_size                                        (size)
916         , m_atomSize                            (atomSize)
917         , m_memoryUsage                         (memoryUsage)
918         , m_referenceMemoryUsage        (referenceMemoryUsage)
919         , m_mapping                                     (DE_NULL)
920         , m_referenceMemory                     ((size_t)size, (size_t)m_atomSize)
921 {
922         m_memory = allocMemory(m_vkd, m_device, m_size, m_memoryTypeIndex);
923 }
924
925 MemoryObject::~MemoryObject (void)
926 {
927         delete m_mapping;
928 }
929
930 MemoryMapping* MemoryObject::mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
931 {
932         const VkDeviceSize      size    = randomSize(rng, m_atomSize, m_size);
933         const VkDeviceSize      offset  = randomOffset(rng, m_atomSize, m_size - size);
934         void*                           ptr;
935
936         DE_ASSERT(!m_mapping);
937
938         VK_CHECK(vkd.mapMemory(device, *m_memory, offset, size, 0u, &ptr));
939         TCU_CHECK(ptr);
940         m_mapping = new MemoryMapping(MemoryRange(offset, size), ptr, m_referenceMemory);
941
942         return m_mapping;
943 }
944
945 void MemoryObject::unmap (void)
946 {
947         m_vkd.unmapMemory(m_device, *m_memory);
948
949         delete m_mapping;
950         m_mapping = DE_NULL;
951 }
952
953 void MemoryObject::randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
954 {
955         const size_t                            rangeCount      = (size_t)rng.getInt(1, 10);
956         vector<VkMappedMemoryRange>     ranges          (rangeCount);
957
958         randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
959
960         for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
961                 m_referenceMemory.flush((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
962
963         VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
964 }
965
966 void MemoryObject::randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
967 {
968         const size_t                            rangeCount      = (size_t)rng.getInt(1, 10);
969         vector<VkMappedMemoryRange>     ranges          (rangeCount);
970
971         randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
972
973         for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
974                 m_referenceMemory.invalidate((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
975
976         VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
977 }
978
979 enum
980 {
981         MAX_MEMORY_USAGE_DIV = 2, // Use only 1/2 of each memory heap.
982         MAX_MEMORY_ALLOC_DIV = 2, // Do not alloc more than 1/2 of available space.
983 };
984
985 template<typename T>
986 void removeFirstEqual (vector<T>& vec, const T& val)
987 {
988         for (size_t ndx = 0; ndx < vec.size(); ndx++)
989         {
990                 if (vec[ndx] == val)
991                 {
992                         vec[ndx] = vec.back();
993                         vec.pop_back();
994                         return;
995                 }
996         }
997 }
998
999 enum MemoryClass
1000 {
1001         MEMORY_CLASS_SYSTEM = 0,
1002         MEMORY_CLASS_DEVICE,
1003
1004         MEMORY_CLASS_LAST
1005 };
1006
1007 // \todo [2016-04-20 pyry] Consider estimating memory fragmentation
1008 class TotalMemoryTracker
1009 {
1010 public:
1011                                         TotalMemoryTracker      (void)
1012         {
1013                 std::fill(DE_ARRAY_BEGIN(m_usage), DE_ARRAY_END(m_usage), 0);
1014         }
1015
1016         void                    allocate                        (MemoryClass memClass, VkDeviceSize size)
1017         {
1018                 m_usage[memClass] += size;
1019         }
1020
1021         void                    free                            (MemoryClass memClass, VkDeviceSize size)
1022         {
1023                 DE_ASSERT(size <= m_usage[memClass]);
1024                 m_usage[memClass] -= size;
1025         }
1026
1027         VkDeviceSize    getUsage                        (MemoryClass memClass) const
1028         {
1029                 return m_usage[memClass];
1030         }
1031
1032         VkDeviceSize    getTotalUsage           (void) const
1033         {
1034                 VkDeviceSize total = 0;
1035                 for (int ndx = 0; ndx < MEMORY_CLASS_LAST; ++ndx)
1036                         total += getUsage((MemoryClass)ndx);
1037                 return total;
1038         }
1039
1040 private:
1041         VkDeviceSize    m_usage[MEMORY_CLASS_LAST];
1042 };
1043
1044 VkDeviceSize getHostPageSize (void)
1045 {
1046         return 4096;
1047 }
1048
1049 VkDeviceSize getMinAtomSize (VkDeviceSize nonCoherentAtomSize, const vector<MemoryType>& memoryTypes)
1050 {
1051         for (size_t ndx = 0; ndx < memoryTypes.size(); ndx++)
1052         {
1053                 if ((memoryTypes[ndx].type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
1054                         return 1;
1055         }
1056
1057         return nonCoherentAtomSize;
1058 }
1059
1060 class MemoryHeap
1061 {
1062 public:
1063         MemoryHeap (const VkMemoryHeap&                 heap,
1064                                 const vector<MemoryType>&       memoryTypes,
1065                                 const PlatformMemoryLimits&     memoryLimits,
1066                                 const VkDeviceSize                      nonCoherentAtomSize,
1067                                 TotalMemoryTracker&                     totalMemTracker)
1068                 : m_heap                                (heap)
1069                 , m_memoryTypes                 (memoryTypes)
1070                 , m_limits                              (memoryLimits)
1071                 , m_nonCoherentAtomSize (nonCoherentAtomSize)
1072                 , m_minAtomSize                 (getMinAtomSize(nonCoherentAtomSize, memoryTypes))
1073                 , m_totalMemTracker             (totalMemTracker)
1074                 , m_usage                               (0)
1075         {
1076         }
1077
1078         ~MemoryHeap (void)
1079         {
1080                 for (vector<MemoryObject*>::iterator iter = m_objects.begin(); iter != m_objects.end(); ++iter)
1081                         delete *iter;
1082         }
1083
1084         bool                                                            full                    (void) const;
1085         bool                                                            empty                   (void) const
1086         {
1087                 return m_usage == 0 && !full();
1088         }
1089
1090         MemoryObject*                                           allocateRandom  (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
1091
1092         MemoryObject*                                           getRandomObject (de::Random& rng) const
1093         {
1094                 return rng.choose<MemoryObject*>(m_objects.begin(), m_objects.end());
1095         }
1096
1097         void                                                            free                    (MemoryObject* object)
1098         {
1099                 removeFirstEqual(m_objects, object);
1100                 m_usage -= object->getMemoryUsage();
1101                 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, object->getReferenceMemoryUsage());
1102                 m_totalMemTracker.free(getMemoryClass(), object->getMemoryUsage());
1103                 delete object;
1104         }
1105
1106 private:
1107         MemoryClass                                                     getMemoryClass  (void) const
1108         {
1109                 if ((m_heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
1110                         return MEMORY_CLASS_DEVICE;
1111                 else
1112                         return MEMORY_CLASS_SYSTEM;
1113         }
1114
1115         const VkMemoryHeap                      m_heap;
1116         const vector<MemoryType>        m_memoryTypes;
1117         const PlatformMemoryLimits&     m_limits;
1118         const VkDeviceSize                      m_nonCoherentAtomSize;
1119         const VkDeviceSize                      m_minAtomSize;
1120         TotalMemoryTracker&                     m_totalMemTracker;
1121
1122         VkDeviceSize                            m_usage;
1123         vector<MemoryObject*>           m_objects;
1124 };
1125
1126 // Heap is full if there is not enough memory to allocate minimal memory object.
1127 bool MemoryHeap::full (void) const
1128 {
1129         DE_ASSERT(m_usage <= m_heap.size/MAX_MEMORY_USAGE_DIV);
1130
1131         const VkDeviceSize      availableInHeap         = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1132         const bool                      isUMA                           = m_limits.totalDeviceLocalMemory == 0;
1133         const MemoryClass       memClass                        = getMemoryClass();
1134         const VkDeviceSize      minAllocationSize       = de::max(m_minAtomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1135         // Memory required for reference. One byte and one bit for each byte and one bit per each m_atomSize.
1136         const VkDeviceSize      minReferenceSize        = minAllocationSize
1137                                                                                         + divRoundUp<VkDeviceSize>(minAllocationSize,  8)
1138                                                                                         + divRoundUp<VkDeviceSize>(minAllocationSize,  m_minAtomSize * 8);
1139
1140         if (isUMA)
1141         {
1142                 const VkDeviceSize      totalUsage      = m_totalMemTracker.getTotalUsage();
1143                 const VkDeviceSize      totalSysMem     = (VkDeviceSize)m_limits.totalSystemMemory;
1144
1145                 DE_ASSERT(totalUsage <= totalSysMem);
1146
1147                 return (minAllocationSize + minReferenceSize) > (totalSysMem - totalUsage)
1148                                 || minAllocationSize > availableInHeap;
1149         }
1150         else
1151         {
1152                 const VkDeviceSize      totalUsage              = m_totalMemTracker.getTotalUsage();
1153                 const VkDeviceSize      totalSysMem             = (VkDeviceSize)m_limits.totalSystemMemory;
1154
1155                 const VkDeviceSize      totalMemClass   = memClass == MEMORY_CLASS_SYSTEM
1156                                                                                         ? m_limits.totalSystemMemory
1157                                                                                         : m_limits.totalDeviceLocalMemory;
1158                 const VkDeviceSize      usedMemClass    = m_totalMemTracker.getUsage(memClass);
1159
1160                 DE_ASSERT(usedMemClass <= totalMemClass);
1161
1162                 return minAllocationSize > availableInHeap
1163                                 || minAllocationSize > (totalMemClass - usedMemClass)
1164                                 || minReferenceSize > (totalSysMem - totalUsage);
1165         }
1166 }
1167
1168 MemoryObject* MemoryHeap::allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
1169 {
1170         pair<MemoryType, VkDeviceSize> memoryTypeMaxSizePair;
1171
1172         // Pick random memory type
1173         {
1174                 vector<pair<MemoryType, VkDeviceSize> > memoryTypes;
1175
1176                 const VkDeviceSize      availableInHeap         = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1177                 const bool                      isUMA                           = m_limits.totalDeviceLocalMemory == 0;
1178                 const MemoryClass       memClass                        = getMemoryClass();
1179
1180                 // Collect memory types that can be allocated and the maximum size of allocation.
1181                 // Memory type can be only allocated if minimal memory allocation is less than available memory.
1182                 for (size_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryTypes.size(); memoryTypeNdx++)
1183                 {
1184                         const MemoryType        type                                            = m_memoryTypes[memoryTypeNdx];
1185                         const VkDeviceSize      atomSize                                        = (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
1186                                                                                                                         ? 1
1187                                                                                                                         : m_nonCoherentAtomSize;
1188                         const VkDeviceSize      allocationSizeGranularity       = de::max(atomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1189                         const VkDeviceSize      minAllocationSize                       = allocationSizeGranularity;
1190                         const VkDeviceSize      minReferenceSize                        = minAllocationSize
1191                                                                                                                         + divRoundUp<VkDeviceSize>(minAllocationSize,  8)
1192                                                                                                                         + divRoundUp<VkDeviceSize>(minAllocationSize,  atomSize * 8);
1193
1194                         if (isUMA)
1195                         {
1196                                 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1197                                 const VkDeviceSize      totalUsage                              = m_totalMemTracker.getTotalUsage();
1198                                 const VkDeviceSize      totalSysMem                             = (VkDeviceSize)m_limits.totalSystemMemory;
1199                                 const VkDeviceSize      availableBits                   = (totalSysMem - totalUsage) * 8;
1200                                 // availableBits == maxAllocationSizeBits + maxAllocationReferenceSizeBits
1201                                 // maxAllocationReferenceSizeBits == maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1202                                 // availableBits == maxAllocationSizeBits + maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1203                                 // availableBits == 2 * maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1204                                 // availableBits == (2 + 1/8 + 1/atomSizeBits) * maxAllocationSizeBits
1205                                 // 8 * availableBits == (16 + 1 + 8/atomSizeBits) * maxAllocationSizeBits
1206                                 // atomSizeBits * 8 * availableBits == (17 * atomSizeBits + 8) * maxAllocationSizeBits
1207                                 // maxAllocationSizeBits == atomSizeBits * 8 * availableBits / (17 * atomSizeBits + 8)
1208                                 // maxAllocationSizeBytes == maxAllocationSizeBits / 8
1209                                 // maxAllocationSizeBytes == atomSizeBits * availableBits / (17 * atomSizeBits + 8)
1210                                 // atomSizeBits = atomSize * 8
1211                                 // maxAllocationSizeBytes == atomSize * 8 * availableBits / (17 * atomSize * 8 + 8)
1212                                 // maxAllocationSizeBytes == atomSize * availableBits / (17 * atomSize + 1)
1213                                 const VkDeviceSize      maxAllocationSize               = roundDownToMultiple(((atomSize * availableBits) / (17 * atomSize + 1)), allocationSizeGranularity);
1214
1215                                 DE_ASSERT(totalUsage <= totalSysMem);
1216                                 DE_ASSERT(maxAllocationSize <= totalSysMem);
1217
1218                                 if (minAllocationSize + minReferenceSize <= (totalSysMem - totalUsage) && minAllocationSize <= availableInHeap)
1219                                 {
1220                                         DE_ASSERT(maxAllocationSize >= minAllocationSize);
1221                                         memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1222                                 }
1223                         }
1224                         else
1225                         {
1226                                 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1227                                 const VkDeviceSize      totalUsage                      = m_totalMemTracker.getTotalUsage();
1228                                 const VkDeviceSize      totalSysMem                     = (VkDeviceSize)m_limits.totalSystemMemory;
1229
1230                                 const VkDeviceSize      totalMemClass           = memClass == MEMORY_CLASS_SYSTEM
1231                                                                                                                 ? m_limits.totalSystemMemory
1232                                                                                                                 : m_limits.totalDeviceLocalMemory;
1233                                 const VkDeviceSize      usedMemClass            = m_totalMemTracker.getUsage(memClass);
1234                                 // availableRefBits = maxRefBits + maxRefBits/8 + maxRefBits/atomSizeBits
1235                                 // availableRefBits = maxRefBits * (1 + 1/8 + 1/atomSizeBits)
1236                                 // 8 * availableRefBits = maxRefBits * (8 + 1 + 8/atomSizeBits)
1237                                 // 8 * atomSizeBits * availableRefBits = maxRefBits * (9 * atomSizeBits + 8)
1238                                 // maxRefBits = 8 * atomSizeBits * availableRefBits / (9 * atomSizeBits + 8)
1239                                 // atomSizeBits = atomSize * 8
1240                                 // maxRefBits = 8 * atomSize * 8 * availableRefBits / (9 * atomSize * 8 + 8)
1241                                 // maxRefBits = atomSize * 8 * availableRefBits / (9 * atomSize + 1)
1242                                 // maxRefBytes = atomSize * availableRefBits / (9 * atomSize + 1)
1243                                 const VkDeviceSize      maxAllocationSize       = roundDownToMultiple(de::min(totalMemClass - usedMemClass, (atomSize * 8 * (totalSysMem - totalUsage)) / (9 * atomSize + 1)), allocationSizeGranularity);
1244
1245                                 DE_ASSERT(usedMemClass <= totalMemClass);
1246
1247                                 if (minAllocationSize <= availableInHeap
1248                                                 && minAllocationSize <= (totalMemClass - usedMemClass)
1249                                                 && minReferenceSize <= (totalSysMem - totalUsage))
1250                                 {
1251                                         DE_ASSERT(maxAllocationSize >= minAllocationSize);
1252                                         memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1253                                 }
1254
1255                         }
1256                 }
1257
1258                 memoryTypeMaxSizePair = rng.choose<pair<MemoryType, VkDeviceSize> >(memoryTypes.begin(), memoryTypes.end());
1259         }
1260
1261         const MemoryType                type                                            = memoryTypeMaxSizePair.first;
1262         const VkDeviceSize              maxAllocationSize                       = memoryTypeMaxSizePair.second / MAX_MEMORY_ALLOC_DIV;
1263         const VkDeviceSize              atomSize                                        = (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
1264                                                                                                                 ? 1
1265                                                                                                                 : m_nonCoherentAtomSize;
1266         const VkDeviceSize              allocationSizeGranularity       = de::max(atomSize, getMemoryClass() == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1267         const VkDeviceSize              size                                            = randomSize(rng, atomSize, maxAllocationSize);
1268         const VkDeviceSize              memoryUsage                                     = roundUpToMultiple(size, allocationSizeGranularity);
1269         const VkDeviceSize              referenceMemoryUsage            = size + divRoundUp<VkDeviceSize>(size, 8) + divRoundUp<VkDeviceSize>(size / atomSize, 8);
1270
1271         DE_ASSERT(size <= maxAllocationSize);
1272
1273         MemoryObject* const             object  = new MemoryObject(vkd, device, size, type.index, atomSize, memoryUsage, referenceMemoryUsage);
1274
1275         m_usage += memoryUsage;
1276         m_totalMemTracker.allocate(getMemoryClass(), memoryUsage);
1277         m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, referenceMemoryUsage);
1278         m_objects.push_back(object);
1279
1280         return object;
1281 }
1282
1283 size_t getMemoryObjectSystemSize (Context& context)
1284 {
1285         return computeDeviceMemorySystemMemFootprint(context.getDeviceInterface(), context.getDevice())
1286                    + sizeof(MemoryObject)
1287                    + sizeof(de::SharedPtr<MemoryObject>);
1288 }
1289
1290 size_t getMemoryMappingSystemSize (void)
1291 {
1292         return sizeof(MemoryMapping) + sizeof(de::SharedPtr<MemoryMapping>);
1293 }
1294
1295 class RandomMemoryMappingInstance : public TestInstance
1296 {
1297 public:
1298         RandomMemoryMappingInstance (Context& context, deUint32 seed)
1299                 : TestInstance                          (context)
1300                 , m_memoryObjectSysMemSize      (getMemoryObjectSystemSize(context))
1301                 , m_memoryMappingSysMemSize     (getMemoryMappingSystemSize())
1302                 , m_memoryLimits                        (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
1303                 , m_rng                                         (seed)
1304                 , m_opNdx                                       (0)
1305         {
1306                 const VkPhysicalDevice                                  physicalDevice          = context.getPhysicalDevice();
1307                 const InstanceInterface&                                vki                                     = context.getInstanceInterface();
1308                 const VkPhysicalDeviceMemoryProperties  memoryProperties        = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
1309                 // \todo [2016-05-26 misojarvi] Remove zero check once drivers report correctly 1 instead of 0
1310                 const VkDeviceSize                                              nonCoherentAtomSize     = context.getDeviceProperties().limits.nonCoherentAtomSize != 0
1311                                                                                                                                         ? context.getDeviceProperties().limits.nonCoherentAtomSize
1312                                                                                                                                         : 1;
1313
1314                 // Initialize heaps
1315                 {
1316                         vector<vector<MemoryType> >     memoryTypes     (memoryProperties.memoryHeapCount);
1317
1318                         for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < memoryProperties.memoryTypeCount; memoryTypeNdx++)
1319                         {
1320                                 if (memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
1321                                         memoryTypes[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].push_back(MemoryType(memoryTypeNdx, memoryProperties.memoryTypes[memoryTypeNdx]));
1322                         }
1323
1324                         for (deUint32 heapIndex = 0; heapIndex < memoryProperties.memoryHeapCount; heapIndex++)
1325                         {
1326                                 const VkMemoryHeap      heapInfo        = memoryProperties.memoryHeaps[heapIndex];
1327
1328                                 if (!memoryTypes[heapIndex].empty())
1329                                 {
1330                                         const de::SharedPtr<MemoryHeap> heap    (new MemoryHeap(heapInfo, memoryTypes[heapIndex], m_memoryLimits, nonCoherentAtomSize, m_totalMemTracker));
1331
1332                                         TCU_CHECK_INTERNAL(!heap->full());
1333
1334                                         m_memoryHeaps.push_back(heap);
1335                                 }
1336                         }
1337                 }
1338         }
1339
1340         ~RandomMemoryMappingInstance (void)
1341         {
1342         }
1343
1344         tcu::TestStatus iterate (void)
1345         {
1346                 const size_t                    opCount                                         = 100;
1347                 const float                             memoryOpProbability                     = 0.5f;         // 0.50
1348                 const float                             flushInvalidateProbability      = 0.4f;         // 0.20
1349                 const float                             mapProbability                          = 0.50f;        // 0.15
1350                 const float                             unmapProbability                        = 0.25f;        // 0.075
1351
1352                 const float                             allocProbability                        = 0.75f; // Versun free
1353
1354                 const VkDevice                  device                                          = m_context.getDevice();
1355                 const DeviceInterface&  vkd                                                     = m_context.getDeviceInterface();
1356
1357                 const VkDeviceSize              sysMemUsage                                     = (m_memoryLimits.totalDeviceLocalMemory == 0)
1358                                                                                                                         ? m_totalMemTracker.getTotalUsage()
1359                                                                                                                         : m_totalMemTracker.getUsage(MEMORY_CLASS_SYSTEM);
1360
1361                 if (!m_memoryMappings.empty() && m_rng.getFloat() < memoryOpProbability)
1362                 {
1363                         // Perform operations on mapped memory
1364                         MemoryMapping* const    mapping = m_rng.choose<MemoryMapping*>(m_memoryMappings.begin(), m_memoryMappings.end());
1365
1366                         enum Op
1367                         {
1368                                 OP_READ = 0,
1369                                 OP_WRITE,
1370                                 OP_MODIFY,
1371                                 OP_LAST
1372                         };
1373
1374                         const Op op = (Op)(m_rng.getUint32() % OP_LAST);
1375
1376                         switch (op)
1377                         {
1378                                 case OP_READ:
1379                                         mapping->randomRead(m_rng);
1380                                         break;
1381
1382                                 case OP_WRITE:
1383                                         mapping->randomWrite(m_rng);
1384                                         break;
1385
1386                                 case OP_MODIFY:
1387                                         mapping->randomModify(m_rng);
1388                                         break;
1389
1390                                 default:
1391                                         DE_FATAL("Invalid operation");
1392                         }
1393                 }
1394                 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < flushInvalidateProbability)
1395                 {
1396                         MemoryObject* const     object  = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1397
1398                         if (m_rng.getBool())
1399                                 object->randomFlush(vkd, device, m_rng);
1400                         else
1401                                 object->randomInvalidate(vkd, device, m_rng);
1402                 }
1403                 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < unmapProbability)
1404                 {
1405                         // Unmap memory object
1406                         MemoryObject* const     object  = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1407
1408                         // Remove mapping
1409                         removeFirstEqual(m_memoryMappings, object->getMapping());
1410
1411                         object->unmap();
1412                         removeFirstEqual(m_mappedMemoryObjects, object);
1413                         m_nonMappedMemoryObjects.push_back(object);
1414
1415                         m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1416                 }
1417                 else if (!m_nonMappedMemoryObjects.empty() &&
1418                                  (m_rng.getFloat() < mapProbability) &&
1419                                  (sysMemUsage+m_memoryMappingSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory))
1420                 {
1421                         // Map memory object
1422                         MemoryObject* const             object  = m_rng.choose<MemoryObject*>(m_nonMappedMemoryObjects.begin(), m_nonMappedMemoryObjects.end());
1423                         MemoryMapping*                  mapping = object->mapRandom(vkd, device, m_rng);
1424
1425                         m_memoryMappings.push_back(mapping);
1426                         m_mappedMemoryObjects.push_back(object);
1427                         removeFirstEqual(m_nonMappedMemoryObjects, object);
1428
1429                         m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1430                 }
1431                 else
1432                 {
1433                         // Sort heaps based on capacity (full or not)
1434                         vector<MemoryHeap*>             nonFullHeaps;
1435                         vector<MemoryHeap*>             nonEmptyHeaps;
1436
1437                         if (sysMemUsage+m_memoryObjectSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory)
1438                         {
1439                                 // For the duration of sorting reserve MemoryObject space from system memory
1440                                 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1441
1442                                 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1443                                          heapIter != m_memoryHeaps.end();
1444                                          ++heapIter)
1445                                 {
1446                                         if (!(*heapIter)->full())
1447                                                 nonFullHeaps.push_back(heapIter->get());
1448
1449                                         if (!(*heapIter)->empty())
1450                                                 nonEmptyHeaps.push_back(heapIter->get());
1451                                 }
1452
1453                                 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1454                         }
1455                         else
1456                         {
1457                                 // Not possible to even allocate MemoryObject from system memory, look for non-empty heaps
1458                                 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1459                                          heapIter != m_memoryHeaps.end();
1460                                          ++heapIter)
1461                                 {
1462                                         if (!(*heapIter)->empty())
1463                                                 nonEmptyHeaps.push_back(heapIter->get());
1464                                 }
1465                         }
1466
1467                         if (!nonFullHeaps.empty() && (nonEmptyHeaps.empty() || m_rng.getFloat() < allocProbability))
1468                         {
1469                                 // Reserve MemoryObject from sys mem first
1470                                 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1471
1472                                 // Allocate more memory objects
1473                                 MemoryHeap* const       heap    = m_rng.choose<MemoryHeap*>(nonFullHeaps.begin(), nonFullHeaps.end());
1474                                 MemoryObject* const     object  = heap->allocateRandom(vkd, device, m_rng);
1475
1476                                 m_nonMappedMemoryObjects.push_back(object);
1477                         }
1478                         else
1479                         {
1480                                 // Free memory objects
1481                                 MemoryHeap* const               heap    = m_rng.choose<MemoryHeap*>(nonEmptyHeaps.begin(), nonEmptyHeaps.end());
1482                                 MemoryObject* const             object  = heap->getRandomObject(m_rng);
1483
1484                                 // Remove mapping
1485                                 if (object->getMapping())
1486                                 {
1487                                         removeFirstEqual(m_memoryMappings, object->getMapping());
1488                                         m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, m_memoryMappingSysMemSize);
1489                                 }
1490
1491                                 removeFirstEqual(m_mappedMemoryObjects, object);
1492                                 removeFirstEqual(m_nonMappedMemoryObjects, object);
1493
1494                                 heap->free(object);
1495                                 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1496                         }
1497                 }
1498
1499                 m_opNdx += 1;
1500                 if (m_opNdx == opCount)
1501                         return tcu::TestStatus::pass("Pass");
1502                 else
1503                         return tcu::TestStatus::incomplete();
1504         }
1505
1506 private:
1507         const size_t                                            m_memoryObjectSysMemSize;
1508         const size_t                                            m_memoryMappingSysMemSize;
1509         const PlatformMemoryLimits                      m_memoryLimits;
1510
1511         de::Random                                                      m_rng;
1512         size_t                                                          m_opNdx;
1513
1514         TotalMemoryTracker                                      m_totalMemTracker;
1515         vector<de::SharedPtr<MemoryHeap> >      m_memoryHeaps;
1516
1517         vector<MemoryObject*>                           m_mappedMemoryObjects;
1518         vector<MemoryObject*>                           m_nonMappedMemoryObjects;
1519         vector<MemoryMapping*>                          m_memoryMappings;
1520 };
1521
1522 enum Op
1523 {
1524         OP_NONE = 0,
1525
1526         OP_FLUSH,
1527         OP_SUB_FLUSH,
1528         OP_SUB_FLUSH_SEPARATE,
1529         OP_SUB_FLUSH_OVERLAPPING,
1530
1531         OP_INVALIDATE,
1532         OP_SUB_INVALIDATE,
1533         OP_SUB_INVALIDATE_SEPARATE,
1534         OP_SUB_INVALIDATE_OVERLAPPING,
1535
1536         OP_REMAP,
1537
1538         OP_LAST
1539 };
1540
1541 TestConfig subMappedConfig (VkDeviceSize                                allocationSize,
1542                                                         const MemoryRange&                      mapping,
1543                                                         Op                                                      op,
1544                                                         deUint32                                        seed,
1545                                                         AllocationKind                          allocationKind)
1546 {
1547         TestConfig config;
1548
1549         config.allocationSize   = allocationSize;
1550         config.seed                             = seed;
1551         config.mapping                  = mapping;
1552         config.remap                    = false;
1553         config.allocationKind   = allocationKind;
1554
1555         switch (op)
1556         {
1557                 case OP_NONE:
1558                         return config;
1559
1560                 case OP_REMAP:
1561                         config.remap = true;
1562                         return config;
1563
1564                 case OP_FLUSH:
1565                         config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1566                         return config;
1567
1568                 case OP_SUB_FLUSH:
1569                         DE_ASSERT(mapping.size / 4 > 0);
1570
1571                         config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1572                         return config;
1573
1574                 case OP_SUB_FLUSH_SEPARATE:
1575                         DE_ASSERT(mapping.size / 2 > 0);
1576
1577                         config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
1578                         config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1579
1580                         return config;
1581
1582                 case OP_SUB_FLUSH_OVERLAPPING:
1583                         DE_ASSERT((mapping.size / 3) > 0);
1584
1585                         config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
1586                         config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1587
1588                         return config;
1589
1590                 case OP_INVALIDATE:
1591                         config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1592                         config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1593                         return config;
1594
1595                 case OP_SUB_INVALIDATE:
1596                         DE_ASSERT(mapping.size / 4 > 0);
1597
1598                         config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1599                         config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1600                         return config;
1601
1602                 case OP_SUB_INVALIDATE_SEPARATE:
1603                         DE_ASSERT(mapping.size / 2 > 0);
1604
1605                         config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
1606                         config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1607
1608                         config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size /  2, mapping.size - (mapping.size / 2)));
1609                         config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1610
1611                         return config;
1612
1613                 case OP_SUB_INVALIDATE_OVERLAPPING:
1614                         DE_ASSERT((mapping.size / 3) > 0);
1615
1616                         config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
1617                         config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1618
1619                         config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size /  3, mapping.size - (mapping.size / 2)));
1620                         config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1621
1622                         return config;
1623
1624                 default:
1625                         DE_FATAL("Unknown Op");
1626                         return TestConfig();
1627         }
1628 }
1629
1630 TestConfig fullMappedConfig (VkDeviceSize       allocationSize,
1631                                                          Op                             op,
1632                                                          deUint32               seed,
1633                                                          AllocationKind allocationKind)
1634 {
1635         return subMappedConfig(allocationSize, MemoryRange(0, allocationSize), op, seed, allocationKind);
1636 }
1637
1638 } // anonymous
1639
1640 tcu::TestCaseGroup* createMappingTests (tcu::TestContext& testCtx)
1641 {
1642         de::MovePtr<tcu::TestCaseGroup>         group                                                   (new tcu::TestCaseGroup(testCtx, "mapping", "Memory mapping tests."));
1643         de::MovePtr<tcu::TestCaseGroup>         dedicated                                               (new tcu::TestCaseGroup(testCtx, "dedicated_alloc", "Dedicated memory mapping tests."));
1644         de::MovePtr<tcu::TestCaseGroup>         sets[]                                                  =
1645         {
1646                 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "suballocation", "Suballocated memory mapping tests.")),
1647                 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "buffer", "Buffer dedicated memory mapping tests.")),
1648                 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "image", "Image dedicated memory mapping tests."))
1649         };
1650
1651         const VkDeviceSize allocationSizes[] =
1652         {
1653                 33, 257, 4087, 8095, 1*1024*1024 + 1
1654         };
1655
1656         const VkDeviceSize offsets[] =
1657         {
1658                 0, 17, 129, 255, 1025, 32*1024+1
1659         };
1660
1661         const VkDeviceSize sizes[] =
1662         {
1663                 31, 255, 1025, 4085, 1*1024*1024 - 1
1664         };
1665
1666         const struct
1667         {
1668                 const Op                        op;
1669                 const char* const       name;
1670         } ops[] =
1671         {
1672                 { OP_NONE,                                              "simple"                                        },
1673                 { OP_REMAP,                                             "remap"                                         },
1674                 { OP_FLUSH,                                             "flush"                                         },
1675                 { OP_SUB_FLUSH,                                 "subflush"                                      },
1676                 { OP_SUB_FLUSH_SEPARATE,                "subflush_separate"                     },
1677                 { OP_SUB_FLUSH_SEPARATE,                "subflush_overlapping"          },
1678
1679                 { OP_INVALIDATE,                                "invalidate"                            },
1680                 { OP_SUB_INVALIDATE,                    "subinvalidate"                         },
1681                 { OP_SUB_INVALIDATE_SEPARATE,   "subinvalidate_separate"        },
1682                 { OP_SUB_INVALIDATE_SEPARATE,   "subinvalidate_overlapping"     }
1683         };
1684
1685         // .full
1686         for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1687         {
1688                 de::MovePtr<tcu::TestCaseGroup> fullGroup (new tcu::TestCaseGroup(testCtx, "full", "Map memory completely."));
1689
1690                 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1691                 {
1692                         const VkDeviceSize                              allocationSize          = allocationSizes[allocationSizeNdx];
1693                         de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup     (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1694
1695                         for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1696                         {
1697                                 const Op                        op              = ops[opNdx].op;
1698                                 const char* const       name    = ops[opNdx].name;
1699                                 const deUint32          seed    = (deUint32)(opNdx * allocationSizeNdx);
1700                                 const TestConfig        config  = fullMappedConfig(allocationSize, op, seed, static_cast<AllocationKind>(allocationKindNdx));
1701
1702                                 addFunctionCase(allocationSizeGroup.get(), name, name, testMemoryMapping, config);
1703                         }
1704
1705                         fullGroup->addChild(allocationSizeGroup.release());
1706                 }
1707
1708                 sets[allocationKindNdx]->addChild(fullGroup.release());
1709         }
1710
1711         // .sub
1712         for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1713         {
1714                 de::MovePtr<tcu::TestCaseGroup> subGroup (new tcu::TestCaseGroup(testCtx, "sub", "Map part of the memory."));
1715
1716                 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1717                 {
1718                         const VkDeviceSize                              allocationSize          = allocationSizes[allocationSizeNdx];
1719                         de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup     (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1720
1721                         for (size_t offsetNdx = 0; offsetNdx < DE_LENGTH_OF_ARRAY(offsets); offsetNdx++)
1722                         {
1723                                 const VkDeviceSize                              offset                  = offsets[offsetNdx];
1724
1725                                 if (offset >= allocationSize)
1726                                         continue;
1727
1728                                 de::MovePtr<tcu::TestCaseGroup> offsetGroup             (new tcu::TestCaseGroup(testCtx, ("offset_" + de::toString(offset)).c_str(), ""));
1729
1730                                 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
1731                                 {
1732                                         const VkDeviceSize                              size            = sizes[sizeNdx];
1733
1734                                         if (offset + size > allocationSize)
1735                                                 continue;
1736
1737                                         if (offset == 0 && size == allocationSize)
1738                                                 continue;
1739
1740                                         de::MovePtr<tcu::TestCaseGroup> sizeGroup       (new tcu::TestCaseGroup(testCtx, ("size_" + de::toString(size)).c_str(), ""));
1741
1742                                         for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1743                                         {
1744                                                 const deUint32          seed    = (deUint32)(opNdx * allocationSizeNdx);
1745                                                 const Op                        op              = ops[opNdx].op;
1746                                                 const char* const       name    = ops[opNdx].name;
1747                                                 const TestConfig        config  = subMappedConfig(allocationSize, MemoryRange(offset, size), op, seed, static_cast<AllocationKind>(allocationKindNdx));
1748
1749                                                 addFunctionCase(sizeGroup.get(), name, name, testMemoryMapping, config);
1750                                         }
1751
1752                                         offsetGroup->addChild(sizeGroup.release());
1753                                 }
1754
1755                                 allocationSizeGroup->addChild(offsetGroup.release());
1756                         }
1757
1758                         subGroup->addChild(allocationSizeGroup.release());
1759                 }
1760
1761                 sets[allocationKindNdx]->addChild(subGroup.release());
1762         }
1763
1764         // .random
1765         {
1766                 de::MovePtr<tcu::TestCaseGroup> randomGroup     (new tcu::TestCaseGroup(testCtx, "random", "Random memory mapping tests."));
1767                 de::Random                                              rng                     (3927960301u);
1768
1769                 for (size_t ndx = 0; ndx < 100; ndx++)
1770                 {
1771                         const deUint32          seed    = rng.getUint32();
1772                         const std::string       name    = de::toString(ndx);
1773
1774                         randomGroup->addChild(new InstanceFactory1<RandomMemoryMappingInstance, deUint32>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(ndx), "Random case", seed));
1775                 }
1776
1777                 sets[static_cast<deUint32>(ALLOCATION_KIND_SUBALLOCATED)]->addChild(randomGroup.release());
1778         }
1779
1780         group->addChild(sets[0].release());
1781         dedicated->addChild(sets[1].release());
1782         dedicated->addChild(sets[2].release());
1783         group->addChild(dedicated.release());
1784
1785         return group.release();
1786 }
1787
1788 } // memory
1789 } // vkt