1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Simple memory mapping tests.
22 *//*--------------------------------------------------------------------*/
24 #include "vktMemoryMappingTests.hpp"
26 #include "vktTestCaseUtil.hpp"
28 #include "tcuMaybe.hpp"
29 #include "tcuResultCollector.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuPlatform.hpp"
33 #include "vkDeviceUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkQueryUtil.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkStrUtil.hpp"
39 #include "vkAllocationCallbackUtil.hpp"
41 #include "deRandom.hpp"
42 #include "deSharedPtr.hpp"
43 #include "deStringUtil.hpp"
44 #include "deUniquePtr.hpp"
45 #include "deSTLUtil.hpp"
69 REFERENCE_BYTES_PER_BYTE = 2
72 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
74 AllocationCallbackRecorder callbackRecorder (getSystemAllocator());
77 // 1 B allocation from memory type 0
78 const VkMemoryAllocateInfo allocInfo =
80 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
85 const Unique<VkDeviceMemory> memory (allocateMemory(vk, device, &allocInfo));
86 AllocationCallbackValidationResults validateRes;
88 validateAllocationCallbacks(callbackRecorder, &validateRes);
90 TCU_CHECK(validateRes.violations.empty());
92 return getLiveSystemAllocationTotal(validateRes)
93 + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
97 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex)
99 const VkMemoryAllocateInfo pAllocInfo =
101 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
103 pAllocInfo_allocationSize,
104 pAllocInfo_memoryTypeIndex,
106 return allocateMemory(vk, device, &pAllocInfo);
111 MemoryRange (VkDeviceSize offset_ = ~(VkDeviceSize)0, VkDeviceSize size_ = ~(VkDeviceSize)0)
124 : allocationSize (~(VkDeviceSize)0)
128 VkDeviceSize allocationSize;
132 vector<MemoryRange> flushMappings;
133 vector<MemoryRange> invalidateMappings;
137 bool compareAndLogBuffer (TestLog& log, size_t size, const deUint8* result, const deUint8* reference)
139 size_t failedBytes = 0;
140 size_t firstFailed = (size_t)-1;
142 for (size_t ndx = 0; ndx < size; ndx++)
144 if (result[ndx] != reference[ndx])
148 if (firstFailed == (size_t)-1)
155 log << TestLog::Message << "Comparison failed. Failed bytes " << failedBytes << ". First failed at offset " << firstFailed << "." << TestLog::EndMessage;
157 std::ostringstream expectedValues;
158 std::ostringstream resultValues;
160 for (size_t ndx = firstFailed; ndx < firstFailed + 10 && ndx < size; ndx++)
162 if (ndx != firstFailed)
164 expectedValues << ", ";
165 resultValues << ", ";
168 expectedValues << reference[ndx];
169 resultValues << result[ndx];
172 if (firstFailed + 10 < size)
174 expectedValues << "...";
175 resultValues << "...";
178 log << TestLog::Message << "Expected values at offset: " << firstFailed << ", " << expectedValues.str() << TestLog::EndMessage;
179 log << TestLog::Message << "Result values at offset: " << firstFailed << ", " << resultValues.str() << TestLog::EndMessage;
187 tcu::TestStatus testMemoryMapping (Context& context, const TestConfig config)
189 TestLog& log = context.getTestContext().getLog();
190 tcu::ResultCollector result (log);
191 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
192 const VkDevice device = context.getDevice();
193 const InstanceInterface& vki = context.getInstanceInterface();
194 const DeviceInterface& vkd = context.getDeviceInterface();
195 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
196 // \todo [2016-05-27 misojarvi] Remove once drivers start reporting correctly nonCoherentAtomSize that is at least 1.
197 const VkDeviceSize nonCoherentAtomSize = context.getDeviceProperties().limits.nonCoherentAtomSize != 0
198 ? context.getDeviceProperties().limits.nonCoherentAtomSize
202 const tcu::ScopedLogSection section (log, "TestCaseInfo", "TestCaseInfo");
204 log << TestLog::Message << "Seed: " << config.seed << TestLog::EndMessage;
205 log << TestLog::Message << "Allocation size: " << config.allocationSize << " * atom" << TestLog::EndMessage;
206 log << TestLog::Message << "Mapping, offset: " << config.mapping.offset << " * atom, size: " << config.mapping.size << " * atom" << TestLog::EndMessage;
208 if (!config.flushMappings.empty())
210 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
212 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
213 log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset << " * atom, Size: " << config.flushMappings[ndx].size << " * atom" << TestLog::EndMessage;
217 log << TestLog::Message << "Remapping memory between flush and invalidation." << TestLog::EndMessage;
219 if (!config.invalidateMappings.empty())
221 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
223 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
224 log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset << " * atom, Size: " << config.invalidateMappings[ndx].size << " * atom" << TestLog::EndMessage;
228 for (deUint32 memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
232 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
233 const VkMemoryType& memoryType = memoryProperties.memoryTypes[memoryTypeIndex];
234 const VkMemoryHeap& memoryHeap = memoryProperties.memoryHeaps[memoryType.heapIndex];
235 const VkDeviceSize atomSize = (memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
237 : nonCoherentAtomSize;
239 log << TestLog::Message << "MemoryType: " << memoryType << TestLog::EndMessage;
240 log << TestLog::Message << "MemoryHeap: " << memoryHeap << TestLog::EndMessage;
241 log << TestLog::Message << "AtomSize: " << atomSize << TestLog::EndMessage;
242 log << TestLog::Message << "AllocationSize: " << config.allocationSize * atomSize << TestLog::EndMessage;
243 log << TestLog::Message << "Mapping, offset: " << config.mapping.offset * atomSize << ", size: " << config.mapping.size * atomSize << TestLog::EndMessage;
245 if (!config.flushMappings.empty())
247 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
249 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
250 log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset * atomSize << ", Size: " << config.flushMappings[ndx].size * atomSize << TestLog::EndMessage;
253 if (!config.invalidateMappings.empty())
255 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
257 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
258 log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset * atomSize << ", Size: " << config.invalidateMappings[ndx].size * atomSize << TestLog::EndMessage;
261 if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
263 log << TestLog::Message << "Memory type doesn't support mapping." << TestLog::EndMessage;
265 else if (memoryHeap.size <= 4 * atomSize * config.allocationSize)
267 log << TestLog::Message << "Memory types heap is too small." << TestLog::EndMessage;
271 const Unique<VkDeviceMemory> memory (allocMemory(vkd, device, config.allocationSize * atomSize, memoryTypeIndex));
272 de::Random rng (config.seed);
273 vector<deUint8> reference ((size_t)(config.allocationSize * atomSize));
274 deUint8* mapping = DE_NULL;
278 VK_CHECK(vkd.mapMemory(device, *memory, config.mapping.offset * atomSize, config.mapping.size * atomSize, 0u, &ptr));
281 mapping = (deUint8*)ptr;
284 for (VkDeviceSize ndx = 0; ndx < config.mapping.size * atomSize; ndx++)
286 const deUint8 val = rng.getUint8();
289 reference[(size_t)(config.mapping.offset * atomSize + ndx)] = val;
292 if (!config.flushMappings.empty())
294 vector<VkMappedMemoryRange> ranges;
296 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
298 const VkMappedMemoryRange range =
300 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
304 config.flushMappings[ndx].offset * atomSize,
305 config.flushMappings[ndx].size * atomSize
308 ranges.push_back(range);
311 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
317 vkd.unmapMemory(device, *memory);
318 VK_CHECK(vkd.mapMemory(device, *memory, config.mapping.offset * atomSize, config.mapping.size * atomSize, 0u, &ptr));
321 mapping = (deUint8*)ptr;
324 if (!config.invalidateMappings.empty())
326 vector<VkMappedMemoryRange> ranges;
328 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
330 const VkMappedMemoryRange range =
332 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
336 config.invalidateMappings[ndx].offset * atomSize,
337 config.invalidateMappings[ndx].size * atomSize
340 ranges.push_back(range);
343 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
346 if (!compareAndLogBuffer(log, (size_t)(config.mapping.size * atomSize), mapping, &reference[(size_t)(config.mapping.offset * atomSize)]))
347 result.fail("Unexpected values read from mapped memory.");
349 vkd.unmapMemory(device, *memory);
352 catch (const tcu::TestError& error)
354 result.fail(error.getMessage());
358 return tcu::TestStatus(result.getResult(), result.getMessage());
364 MemoryMapping (const MemoryRange& range,
368 void randomRead (de::Random& rng);
369 void randomWrite (de::Random& rng);
370 void randomModify (de::Random& rng);
372 const MemoryRange& getRange (void) const { return m_range; }
380 MemoryMapping::MemoryMapping (const MemoryRange& range,
387 DE_ASSERT(range.size > 0);
390 void MemoryMapping::randomRead (de::Random& rng)
392 const size_t count = (size_t)rng.getInt(0, 100);
394 for (size_t ndx = 0; ndx < count; ndx++)
396 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
397 const deUint8 val = ((deUint8*) m_ptr)[pos];
399 if (m_refPtr[pos] < 256)
400 TCU_CHECK((deUint16)val == m_refPtr[pos]);
402 m_refPtr[pos] = (deUint16)val;
406 void MemoryMapping::randomWrite (de::Random& rng)
408 const size_t count = (size_t)rng.getInt(0, 100);
410 for (size_t ndx = 0; ndx < count; ndx++)
412 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
413 const deUint8 val = rng.getUint8();
415 ((deUint8*)m_ptr)[pos] = val;
416 m_refPtr[pos] = (deUint16)val;
420 void MemoryMapping::randomModify (de::Random& rng)
422 const size_t count = (size_t)rng.getInt(0, 100);
424 for (size_t ndx = 0; ndx < count; ndx++)
426 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
427 const deUint8 val = ((deUint8*)m_ptr)[pos];
428 const deUint8 mask = rng.getUint8();
430 if (m_refPtr[pos] < 256)
431 TCU_CHECK((deUint16)val == m_refPtr[pos]);
433 ((deUint8*)m_ptr)[pos] = val ^ mask;
434 m_refPtr[pos] = (deUint16)(val ^ mask);
438 void randomRanges (de::Random& rng, vector<VkMappedMemoryRange>& ranges, size_t count, VkDeviceMemory memory, VkDeviceSize minOffset, VkDeviceSize maxSize)
440 ranges.resize(count);
442 for (size_t rangeNdx = 0; rangeNdx < count; rangeNdx++)
444 const VkDeviceSize size = (maxSize > 1 ? (VkDeviceSize)(1 + (rng.getUint64() % (deUint64)(maxSize - 1))) : 1);
445 const VkDeviceSize offset = minOffset + (VkDeviceSize)(rng.getUint64() % (deUint64)(maxSize - size + 1));
447 const VkMappedMemoryRange range =
449 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
456 ranges[rangeNdx] = range;
463 MemoryObject (const DeviceInterface& vkd,
466 deUint32 memoryTypeIndex);
468 ~MemoryObject (void);
470 MemoryMapping* mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
473 void randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
474 void randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
476 VkDeviceSize getSize (void) const { return m_size; }
477 MemoryMapping* getMapping (void) { return m_mapping; }
480 const DeviceInterface& m_vkd;
483 deUint32 m_memoryTypeIndex;
486 Move<VkDeviceMemory> m_memory;
488 MemoryMapping* m_mapping;
489 vector<deUint16> m_reference;
492 MemoryObject::MemoryObject (const DeviceInterface& vkd,
495 deUint32 memoryTypeIndex)
498 , m_memoryTypeIndex (memoryTypeIndex)
500 , m_mapping (DE_NULL)
502 m_memory = allocMemory(m_vkd, m_device, m_size, m_memoryTypeIndex);
503 m_reference.resize((size_t)m_size, 0xFFFFu);
506 MemoryObject::~MemoryObject (void)
511 MemoryMapping* MemoryObject::mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
513 const VkDeviceSize size = (m_size > 1 ? (VkDeviceSize)(1 + (rng.getUint64() % (deUint64)(m_size - 1))) : 1);
514 const VkDeviceSize offset = (VkDeviceSize)(rng.getUint64() % (deUint64)(m_size - size + 1));
517 DE_ASSERT(!m_mapping);
519 VK_CHECK(vkd.mapMemory(device, *m_memory, offset, size, 0u, &ptr));
521 m_mapping = new MemoryMapping(MemoryRange(offset, size), ptr, &(m_reference[(size_t)offset]));
526 void MemoryObject::unmap (void)
528 m_vkd.unmapMemory(m_device, *m_memory);
534 void MemoryObject::randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
536 const size_t rangeCount = (size_t)rng.getInt(1, 10);
537 vector<VkMappedMemoryRange> ranges (rangeCount);
539 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size);
541 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
544 void MemoryObject::randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
546 const size_t rangeCount = (size_t)rng.getInt(1, 10);
547 vector<VkMappedMemoryRange> ranges (rangeCount);
549 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size);
551 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
556 // Use only 1/2 of each memory heap.
557 MAX_MEMORY_USAGE_DIV = 2
561 void removeFirstEqual (vector<T>& vec, const T& val)
563 for (size_t ndx = 0; ndx < vec.size(); ndx++)
567 vec[ndx] = vec.back();
576 MEMORY_CLASS_SYSTEM = 0,
582 // \todo [2016-04-20 pyry] Consider estimating memory fragmentation
583 class TotalMemoryTracker
586 TotalMemoryTracker (void)
588 std::fill(DE_ARRAY_BEGIN(m_usage), DE_ARRAY_END(m_usage), 0);
591 void allocate (MemoryClass memClass, VkDeviceSize size)
593 m_usage[memClass] += size;
596 void free (MemoryClass memClass, VkDeviceSize size)
598 DE_ASSERT(size <= m_usage[memClass]);
599 m_usage[memClass] -= size;
602 VkDeviceSize getUsage (MemoryClass memClass) const
604 return m_usage[memClass];
607 VkDeviceSize getTotalUsage (void) const
609 VkDeviceSize total = 0;
610 for (int ndx = 0; ndx < MEMORY_CLASS_LAST; ++ndx)
611 total += getUsage((MemoryClass)ndx);
616 VkDeviceSize m_usage[MEMORY_CLASS_LAST];
622 MemoryHeap (const VkMemoryHeap& heap,
623 const vector<deUint32>& memoryTypes,
624 const PlatformMemoryLimits& memoryLimits,
625 TotalMemoryTracker& totalMemTracker)
627 , m_memoryTypes (memoryTypes)
628 , m_limits (memoryLimits)
629 , m_totalMemTracker (totalMemTracker)
636 for (vector<MemoryObject*>::iterator iter = m_objects.begin(); iter != m_objects.end(); ++iter)
640 bool full (void) const { return getAvailableMem() == 0; }
641 bool empty (void) const { return m_usage == 0; }
643 MemoryObject* allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
645 const VkDeviceSize availableMem = getAvailableMem();
647 DE_ASSERT(availableMem > 0);
649 const VkDeviceSize size = 1ull + (rng.getUint64() % availableMem);
650 const deUint32 type = rng.choose<deUint32>(m_memoryTypes.begin(), m_memoryTypes.end());
652 DE_ASSERT(size <= availableMem);
654 MemoryObject* const object = new MemoryObject(vkd, device, size, type);
657 m_totalMemTracker.allocate(getMemoryClass(), size);
658 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, size * REFERENCE_BYTES_PER_BYTE);
659 m_objects.push_back(object);
664 MemoryObject* getRandomObject (de::Random& rng) const
666 return rng.choose<MemoryObject*>(m_objects.begin(), m_objects.end());
669 void free (MemoryObject* object)
671 removeFirstEqual(m_objects, object);
672 m_usage -= object->getSize();
673 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, object->getSize() * REFERENCE_BYTES_PER_BYTE);
674 m_totalMemTracker.free(getMemoryClass(), object->getSize());
679 MemoryClass getMemoryClass (void) const
681 if ((m_heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
682 return MEMORY_CLASS_DEVICE;
684 return MEMORY_CLASS_SYSTEM;
687 VkDeviceSize getAvailableMem (void) const
689 DE_ASSERT(m_usage <= m_heap.size/MAX_MEMORY_USAGE_DIV);
691 const VkDeviceSize availableInHeap = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
692 const bool isUMA = m_limits.totalDeviceLocalMemory == 0;
696 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
697 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
699 DE_ASSERT(totalUsage <= totalSysMem);
701 return de::min(availableInHeap, (totalSysMem-totalUsage) / (1 + REFERENCE_BYTES_PER_BYTE));
705 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
706 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
708 const MemoryClass memClass = getMemoryClass();
709 const VkDeviceSize totalMemClass = memClass == MEMORY_CLASS_SYSTEM
710 ? (VkDeviceSize)(m_limits.totalSystemMemory / (1 + REFERENCE_BYTES_PER_BYTE))
711 : m_limits.totalDeviceLocalMemory;
712 const VkDeviceSize usedMemClass = m_totalMemTracker.getUsage(memClass);
714 DE_ASSERT(usedMemClass <= totalMemClass);
716 return de::min(de::min(availableInHeap, totalMemClass-usedMemClass), (totalSysMem - totalUsage) / REFERENCE_BYTES_PER_BYTE);
720 const VkMemoryHeap m_heap;
721 const vector<deUint32> m_memoryTypes;
722 const PlatformMemoryLimits& m_limits;
723 TotalMemoryTracker& m_totalMemTracker;
725 VkDeviceSize m_usage;
726 vector<MemoryObject*> m_objects;
729 size_t getMemoryObjectSystemSize (Context& context)
731 return computeDeviceMemorySystemMemFootprint(context.getDeviceInterface(), context.getDevice())
732 + sizeof(MemoryObject)
733 + sizeof(de::SharedPtr<MemoryObject>);
736 size_t getMemoryMappingSystemSize (void)
738 return sizeof(MemoryMapping) + sizeof(de::SharedPtr<MemoryMapping>);
741 class RandomMemoryMappingInstance : public TestInstance
744 RandomMemoryMappingInstance (Context& context, deUint32 seed)
745 : TestInstance (context)
746 , m_memoryObjectSysMemSize (getMemoryObjectSystemSize(context))
747 , m_memoryMappingSysMemSize (getMemoryMappingSystemSize())
748 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
752 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
753 const InstanceInterface& vki = context.getInstanceInterface();
754 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
758 vector<vector<deUint32> > memoryTypes (memoryProperties.memoryHeapCount);
760 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < memoryProperties.memoryTypeCount; memoryTypeNdx++)
762 if (memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
763 memoryTypes[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].push_back(memoryTypeNdx);
766 for (deUint32 heapIndex = 0; heapIndex < memoryProperties.memoryHeapCount; heapIndex++)
768 const VkMemoryHeap heapInfo = memoryProperties.memoryHeaps[heapIndex];
770 if (!memoryTypes[heapIndex].empty())
772 const de::SharedPtr<MemoryHeap> heap (new MemoryHeap(heapInfo, memoryTypes[heapIndex], m_memoryLimits, m_totalMemTracker));
774 TCU_CHECK_INTERNAL(!heap->full());
776 m_memoryHeaps.push_back(heap);
782 ~RandomMemoryMappingInstance (void)
786 tcu::TestStatus iterate (void)
788 const size_t opCount = 100;
789 const float memoryOpProbability = 0.5f; // 0.50
790 const float flushInvalidateProbability = 0.4f; // 0.20
791 const float mapProbability = 0.50f; // 0.15
792 const float unmapProbability = 0.25f; // 0.075
794 const float allocProbability = 0.75f; // Versun free
796 const VkDevice device = m_context.getDevice();
797 const DeviceInterface& vkd = m_context.getDeviceInterface();
799 const VkDeviceSize sysMemUsage = (m_memoryLimits.totalDeviceLocalMemory == 0)
800 ? m_totalMemTracker.getTotalUsage()
801 : m_totalMemTracker.getUsage(MEMORY_CLASS_SYSTEM);
803 if (!m_memoryMappings.empty() && m_rng.getFloat() < memoryOpProbability)
805 // Perform operations on mapped memory
806 MemoryMapping* const mapping = m_rng.choose<MemoryMapping*>(m_memoryMappings.begin(), m_memoryMappings.end());
816 const Op op = (Op)(m_rng.getUint32() % OP_LAST);
821 mapping->randomRead(m_rng);
825 mapping->randomWrite(m_rng);
829 mapping->randomModify(m_rng);
833 DE_FATAL("Invalid operation");
836 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < flushInvalidateProbability)
838 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
841 object->randomFlush(vkd, device, m_rng);
843 object->randomInvalidate(vkd, device, m_rng);
845 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < unmapProbability)
847 // Unmap memory object
848 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
851 removeFirstEqual(m_memoryMappings, object->getMapping());
854 removeFirstEqual(m_mappedMemoryObjects, object);
855 m_nonMappedMemoryObjects.push_back(object);
857 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
859 else if (!m_nonMappedMemoryObjects.empty() &&
860 (m_rng.getFloat() < mapProbability) &&
861 (sysMemUsage+m_memoryMappingSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory))
864 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_nonMappedMemoryObjects.begin(), m_nonMappedMemoryObjects.end());
865 MemoryMapping* mapping = object->mapRandom(vkd, device, m_rng);
867 m_memoryMappings.push_back(mapping);
868 m_mappedMemoryObjects.push_back(object);
869 removeFirstEqual(m_nonMappedMemoryObjects, object);
871 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
875 // Sort heaps based on capacity (full or not)
876 vector<MemoryHeap*> nonFullHeaps;
877 vector<MemoryHeap*> nonEmptyHeaps;
879 if (sysMemUsage+m_memoryObjectSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory)
881 // For the duration of sorting reserve MemoryObject space from system memory
882 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
884 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
885 heapIter != m_memoryHeaps.end();
888 if (!(*heapIter)->full())
889 nonFullHeaps.push_back(heapIter->get());
891 if (!(*heapIter)->empty())
892 nonEmptyHeaps.push_back(heapIter->get());
895 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
899 // Not possible to even allocate MemoryObject from system memory, look for non-empty heaps
900 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
901 heapIter != m_memoryHeaps.end();
904 if (!(*heapIter)->empty())
905 nonEmptyHeaps.push_back(heapIter->get());
909 if (!nonFullHeaps.empty() && (nonEmptyHeaps.empty() || m_rng.getFloat() < allocProbability))
911 // Reserve MemoryObject from sys mem first
912 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
914 // Allocate more memory objects
915 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonFullHeaps.begin(), nonFullHeaps.end());
916 MemoryObject* const object = heap->allocateRandom(vkd, device, m_rng);
918 m_nonMappedMemoryObjects.push_back(object);
922 // Free memory objects
923 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonEmptyHeaps.begin(), nonEmptyHeaps.end());
924 MemoryObject* const object = heap->getRandomObject(m_rng);
927 if (object->getMapping())
929 removeFirstEqual(m_memoryMappings, object->getMapping());
930 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, m_memoryMappingSysMemSize);
933 removeFirstEqual(m_mappedMemoryObjects, object);
934 removeFirstEqual(m_nonMappedMemoryObjects, object);
937 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
942 if (m_opNdx == opCount)
943 return tcu::TestStatus::pass("Pass");
945 return tcu::TestStatus::incomplete();
949 const size_t m_memoryObjectSysMemSize;
950 const size_t m_memoryMappingSysMemSize;
951 const PlatformMemoryLimits m_memoryLimits;
956 TotalMemoryTracker m_totalMemTracker;
957 vector<de::SharedPtr<MemoryHeap> > m_memoryHeaps;
959 vector<MemoryObject*> m_mappedMemoryObjects;
960 vector<MemoryObject*> m_nonMappedMemoryObjects;
961 vector<MemoryMapping*> m_memoryMappings;
970 OP_SUB_FLUSH_SEPARATE,
971 OP_SUB_FLUSH_OVERLAPPING,
975 OP_SUB_INVALIDATE_SEPARATE,
976 OP_SUB_INVALIDATE_OVERLAPPING,
983 TestConfig subMappedConfig (VkDeviceSize allocationSize,
984 const MemoryRange& mapping,
990 config.allocationSize = allocationSize;
992 config.mapping = mapping;
993 config.remap = false;
1001 config.remap = true;
1005 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1009 DE_ASSERT(mapping.size / 4 > 0);
1011 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1014 case OP_SUB_FLUSH_SEPARATE:
1015 DE_ASSERT(mapping.size / 2 > 0);
1017 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1018 config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1022 case OP_SUB_FLUSH_OVERLAPPING:
1023 DE_ASSERT((mapping.size / 3) > 0);
1025 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1026 config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1031 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1034 case OP_SUB_INVALIDATE:
1035 DE_ASSERT(mapping.size / 4 > 0);
1037 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1040 case OP_SUB_INVALIDATE_SEPARATE:
1041 DE_ASSERT(mapping.size / 2 > 0);
1043 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1044 config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1048 case OP_SUB_INVALIDATE_OVERLAPPING:
1049 DE_ASSERT((mapping.size / 3) > 0);
1051 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1052 config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1057 DE_FATAL("Unknown Op");
1058 return TestConfig();
1062 TestConfig fullMappedConfig (VkDeviceSize allocationSize,
1066 return subMappedConfig(allocationSize, MemoryRange(0, allocationSize), op, seed);
1071 tcu::TestCaseGroup* createMappingTests (tcu::TestContext& testCtx)
1073 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "mapping", "Memory mapping tests."));
1075 const VkDeviceSize allocationSizes[] =
1077 33, 257, 4087, 8095, 1*1024*1024 + 1
1080 const VkDeviceSize offsets[] =
1082 0, 17, 129, 255, 1025, 32*1024+1
1085 const VkDeviceSize sizes[] =
1087 31, 255, 1025, 4085, 1*1024*1024 - 1
1093 const char* const name;
1096 { OP_NONE, "simple" },
1097 { OP_REMAP, "remap" },
1098 { OP_FLUSH, "flush" },
1099 { OP_SUB_FLUSH, "subflush" },
1100 { OP_SUB_FLUSH_SEPARATE, "subflush_separate" },
1101 { OP_SUB_FLUSH_SEPARATE, "subflush_overlapping" },
1103 { OP_INVALIDATE, "invalidate" },
1104 { OP_SUB_INVALIDATE, "subinvalidate" },
1105 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_separate" },
1106 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_overlapping" }
1111 de::MovePtr<tcu::TestCaseGroup> fullGroup (new tcu::TestCaseGroup(testCtx, "full", "Map memory completely."));
1113 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1115 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1116 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1118 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1120 const Op op = ops[opNdx].op;
1121 const char* const name = ops[opNdx].name;
1122 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1123 const TestConfig config = fullMappedConfig(allocationSize, op, seed);
1125 addFunctionCase(allocationSizeGroup.get(), name, name, testMemoryMapping, config);
1128 fullGroup->addChild(allocationSizeGroup.release());
1131 group->addChild(fullGroup.release());
1136 de::MovePtr<tcu::TestCaseGroup> subGroup (new tcu::TestCaseGroup(testCtx, "sub", "Map part of the memory."));
1138 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1140 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1141 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1143 for (size_t offsetNdx = 0; offsetNdx < DE_LENGTH_OF_ARRAY(offsets); offsetNdx++)
1145 const VkDeviceSize offset = offsets[offsetNdx];
1147 if (offset >= allocationSize)
1150 de::MovePtr<tcu::TestCaseGroup> offsetGroup (new tcu::TestCaseGroup(testCtx, ("offset_" + de::toString(offset)).c_str(), ""));
1152 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
1154 const VkDeviceSize size = sizes[sizeNdx];
1156 if (offset + size > allocationSize)
1159 if (offset == 0 && size == allocationSize)
1162 de::MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(testCtx, ("size_" + de::toString(size)).c_str(), ""));
1164 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1166 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1167 const Op op = ops[opNdx].op;
1168 const char* const name = ops[opNdx].name;
1169 const TestConfig config = subMappedConfig(allocationSize, MemoryRange(offset, size), op, seed);
1171 addFunctionCase(sizeGroup.get(), name, name, testMemoryMapping, config);
1174 offsetGroup->addChild(sizeGroup.release());
1177 allocationSizeGroup->addChild(offsetGroup.release());
1180 subGroup->addChild(allocationSizeGroup.release());
1183 group->addChild(subGroup.release());
1188 de::MovePtr<tcu::TestCaseGroup> randomGroup (new tcu::TestCaseGroup(testCtx, "random", "Random memory mapping tests."));
1189 de::Random rng (3927960301u);
1191 for (size_t ndx = 0; ndx < 100; ndx++)
1193 const deUint32 seed = rng.getUint32();
1194 const std::string name = de::toString(ndx);
1196 randomGroup->addChild(new InstanceFactory1<RandomMemoryMappingInstance, deUint32>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(ndx), "Random case", seed));
1199 group->addChild(randomGroup.release());
1202 return group.release();