1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Simple memory mapping tests.
22 *//*--------------------------------------------------------------------*/
24 #include "vktMemoryMappingTests.hpp"
26 #include "vktTestCaseUtil.hpp"
28 #include "tcuMaybe.hpp"
29 #include "tcuResultCollector.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuPlatform.hpp"
33 #include "vkDeviceUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkQueryUtil.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkStrUtil.hpp"
39 #include "vkAllocationCallbackUtil.hpp"
41 #include "deRandom.hpp"
42 #include "deSharedPtr.hpp"
43 #include "deStringUtil.hpp"
44 #include "deUniquePtr.hpp"
45 #include "deSTLUtil.hpp"
69 T divRoundUp (const T& a, const T& b)
71 return (a / b) + (a % b == 0 ? 0 : 1);
75 T roundDownToMultiple (const T& a, const T& b)
81 T roundUpToMultiple (const T& a, const T& b)
83 return b * (a / b + (a % b != 0 ? 1 : 0));
86 // \note Bit vector that guarantees that each value takes only one bit.
87 // std::vector<bool> is often optimized to only take one bit for each bool, but
88 // that is implementation detail and in this case we really need to known how much
95 BLOCK_BIT_SIZE = 8 * sizeof(deUint32)
98 BitVector (size_t size, bool value = false)
99 : m_data(divRoundUp<size_t>(size, (size_t)BLOCK_BIT_SIZE), value ? ~0x0u : 0x0u)
103 bool get (size_t ndx) const
105 return (m_data[ndx / BLOCK_BIT_SIZE] & (0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE))) != 0;
108 void set (size_t ndx, bool value)
111 m_data[ndx / BLOCK_BIT_SIZE] |= 0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE);
113 m_data[ndx / BLOCK_BIT_SIZE] &= ~(0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE));
116 void setRange (size_t offset, size_t count, bool value)
120 for (; (ndx < offset + count) && ((ndx % BLOCK_BIT_SIZE) != 0); ndx++)
122 DE_ASSERT(ndx >= offset);
123 DE_ASSERT(ndx < offset + count);
128 const size_t endOfFullBlockNdx = roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE);
130 if (ndx < endOfFullBlockNdx)
132 deMemset(&m_data[ndx / BLOCK_BIT_SIZE], (value ? 0xFF : 0x0), (endOfFullBlockNdx - ndx) / 8);
133 ndx = endOfFullBlockNdx;
137 for (; ndx < offset + count; ndx++)
139 DE_ASSERT(ndx >= offset);
140 DE_ASSERT(ndx < offset + count);
145 void vectorAnd (const BitVector& other, size_t offset, size_t count)
149 for (; ndx < offset + count && (ndx % BLOCK_BIT_SIZE) != 0; ndx++)
151 DE_ASSERT(ndx >= offset);
152 DE_ASSERT(ndx < offset + count);
153 set(ndx, other.get(ndx) && get(ndx));
156 for (; ndx < roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE); ndx += BLOCK_BIT_SIZE)
158 DE_ASSERT(ndx >= offset);
159 DE_ASSERT(ndx < offset + count);
160 DE_ASSERT(ndx % BLOCK_BIT_SIZE == 0);
161 DE_ASSERT(ndx + BLOCK_BIT_SIZE <= offset + count);
162 m_data[ndx / BLOCK_BIT_SIZE] &= other.m_data[ndx / BLOCK_BIT_SIZE];
165 for (; ndx < offset + count; ndx++)
167 DE_ASSERT(ndx >= offset);
168 DE_ASSERT(ndx < offset + count);
169 set(ndx, other.get(ndx) && get(ndx));
174 vector<deUint32> m_data;
177 class ReferenceMemory
180 ReferenceMemory (size_t size, size_t atomSize)
181 : m_atomSize (atomSize)
182 , m_bytes (size, 0xDEu)
183 , m_defined (size, false)
184 , m_flushed (size / atomSize, false)
186 DE_ASSERT(size % m_atomSize == 0);
189 void write (size_t pos, deUint8 value)
191 m_bytes[pos] = value;
192 m_defined.set(pos, true);
193 m_flushed.set(pos / m_atomSize, false);
196 bool read (size_t pos, deUint8 value)
198 const bool isOk = !m_defined.get(pos)
199 || m_bytes[pos] == value;
201 m_bytes[pos] = value;
202 m_defined.set(pos, true);
207 bool modifyXor (size_t pos, deUint8 value, deUint8 mask)
209 const bool isOk = !m_defined.get(pos)
210 || m_bytes[pos] == value;
212 m_bytes[pos] = value ^ mask;
213 m_defined.set(pos, true);
214 m_flushed.set(pos / m_atomSize, false);
219 void flush (size_t offset, size_t size)
221 DE_ASSERT((offset % m_atomSize) == 0);
222 DE_ASSERT((size % m_atomSize) == 0);
224 m_flushed.setRange(offset / m_atomSize, size / m_atomSize, true);
227 void invalidate (size_t offset, size_t size)
229 DE_ASSERT((offset % m_atomSize) == 0);
230 DE_ASSERT((size % m_atomSize) == 0);
234 m_defined.vectorAnd(m_flushed, offset, size);
238 for (size_t ndx = 0; ndx < size / m_atomSize; ndx++)
240 if (!m_flushed.get((offset / m_atomSize) + ndx))
241 m_defined.setRange(offset + ndx * m_atomSize, m_atomSize, false);
248 const size_t m_atomSize;
249 vector<deUint8> m_bytes;
256 MemoryType (deUint32 index_, const VkMemoryType& type_)
271 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
273 AllocationCallbackRecorder callbackRecorder (getSystemAllocator());
276 // 1 B allocation from memory type 0
277 const VkMemoryAllocateInfo allocInfo =
279 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
284 const Unique<VkDeviceMemory> memory (allocateMemory(vk, device, &allocInfo));
285 AllocationCallbackValidationResults validateRes;
287 validateAllocationCallbacks(callbackRecorder, &validateRes);
289 TCU_CHECK(validateRes.violations.empty());
291 return getLiveSystemAllocationTotal(validateRes)
292 + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
296 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex)
298 const VkMemoryAllocateInfo pAllocInfo =
300 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
302 pAllocInfo_allocationSize,
303 pAllocInfo_memoryTypeIndex,
305 return allocateMemory(vk, device, &pAllocInfo);
310 MemoryRange (VkDeviceSize offset_ = ~(VkDeviceSize)0, VkDeviceSize size_ = ~(VkDeviceSize)0)
323 : allocationSize (~(VkDeviceSize)0)
327 VkDeviceSize allocationSize;
331 vector<MemoryRange> flushMappings;
332 vector<MemoryRange> invalidateMappings;
336 bool compareAndLogBuffer (TestLog& log, size_t size, const deUint8* result, const deUint8* reference)
338 size_t failedBytes = 0;
339 size_t firstFailed = (size_t)-1;
341 for (size_t ndx = 0; ndx < size; ndx++)
343 if (result[ndx] != reference[ndx])
347 if (firstFailed == (size_t)-1)
354 log << TestLog::Message << "Comparison failed. Failed bytes " << failedBytes << ". First failed at offset " << firstFailed << "." << TestLog::EndMessage;
356 std::ostringstream expectedValues;
357 std::ostringstream resultValues;
359 for (size_t ndx = firstFailed; ndx < firstFailed + 10 && ndx < size; ndx++)
361 if (ndx != firstFailed)
363 expectedValues << ", ";
364 resultValues << ", ";
367 expectedValues << reference[ndx];
368 resultValues << result[ndx];
371 if (firstFailed + 10 < size)
373 expectedValues << "...";
374 resultValues << "...";
377 log << TestLog::Message << "Expected values at offset: " << firstFailed << ", " << expectedValues.str() << TestLog::EndMessage;
378 log << TestLog::Message << "Result values at offset: " << firstFailed << ", " << resultValues.str() << TestLog::EndMessage;
386 tcu::TestStatus testMemoryMapping (Context& context, const TestConfig config)
388 TestLog& log = context.getTestContext().getLog();
389 tcu::ResultCollector result (log);
390 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
391 const VkDevice device = context.getDevice();
392 const InstanceInterface& vki = context.getInstanceInterface();
393 const DeviceInterface& vkd = context.getDeviceInterface();
394 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
395 // \todo [2016-05-27 misojarvi] Remove once drivers start reporting correctly nonCoherentAtomSize that is at least 1.
396 const VkDeviceSize nonCoherentAtomSize = context.getDeviceProperties().limits.nonCoherentAtomSize != 0
397 ? context.getDeviceProperties().limits.nonCoherentAtomSize
401 const tcu::ScopedLogSection section (log, "TestCaseInfo", "TestCaseInfo");
403 log << TestLog::Message << "Seed: " << config.seed << TestLog::EndMessage;
404 log << TestLog::Message << "Allocation size: " << config.allocationSize << " * atom" << TestLog::EndMessage;
405 log << TestLog::Message << "Mapping, offset: " << config.mapping.offset << " * atom, size: " << config.mapping.size << " * atom" << TestLog::EndMessage;
407 if (!config.flushMappings.empty())
409 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
411 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
412 log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset << " * atom, Size: " << config.flushMappings[ndx].size << " * atom" << TestLog::EndMessage;
416 log << TestLog::Message << "Remapping memory between flush and invalidation." << TestLog::EndMessage;
418 if (!config.invalidateMappings.empty())
420 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
422 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
423 log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset << " * atom, Size: " << config.invalidateMappings[ndx].size << " * atom" << TestLog::EndMessage;
427 for (deUint32 memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
431 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
432 const VkMemoryType& memoryType = memoryProperties.memoryTypes[memoryTypeIndex];
433 const VkMemoryHeap& memoryHeap = memoryProperties.memoryHeaps[memoryType.heapIndex];
434 const VkDeviceSize atomSize = (memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
436 : nonCoherentAtomSize;
438 log << TestLog::Message << "MemoryType: " << memoryType << TestLog::EndMessage;
439 log << TestLog::Message << "MemoryHeap: " << memoryHeap << TestLog::EndMessage;
440 log << TestLog::Message << "AtomSize: " << atomSize << TestLog::EndMessage;
441 log << TestLog::Message << "AllocationSize: " << config.allocationSize * atomSize << TestLog::EndMessage;
442 log << TestLog::Message << "Mapping, offset: " << config.mapping.offset * atomSize << ", size: " << config.mapping.size * atomSize << TestLog::EndMessage;
444 if (!config.flushMappings.empty())
446 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
448 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
449 log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset * atomSize << ", Size: " << config.flushMappings[ndx].size * atomSize << TestLog::EndMessage;
452 if (!config.invalidateMappings.empty())
454 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
456 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
457 log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset * atomSize << ", Size: " << config.invalidateMappings[ndx].size * atomSize << TestLog::EndMessage;
460 if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
462 log << TestLog::Message << "Memory type doesn't support mapping." << TestLog::EndMessage;
464 else if (memoryHeap.size <= 4 * atomSize * config.allocationSize)
466 log << TestLog::Message << "Memory types heap is too small." << TestLog::EndMessage;
470 const Unique<VkDeviceMemory> memory (allocMemory(vkd, device, config.allocationSize * atomSize, memoryTypeIndex));
471 de::Random rng (config.seed);
472 vector<deUint8> reference ((size_t)(config.allocationSize * atomSize));
473 deUint8* mapping = DE_NULL;
477 VK_CHECK(vkd.mapMemory(device, *memory, config.mapping.offset * atomSize, config.mapping.size * atomSize, 0u, &ptr));
480 mapping = (deUint8*)ptr;
483 for (VkDeviceSize ndx = 0; ndx < config.mapping.size * atomSize; ndx++)
485 const deUint8 val = rng.getUint8();
488 reference[(size_t)(config.mapping.offset * atomSize + ndx)] = val;
491 if (!config.flushMappings.empty())
493 vector<VkMappedMemoryRange> ranges;
495 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
497 const VkMappedMemoryRange range =
499 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
503 config.flushMappings[ndx].offset * atomSize,
504 config.flushMappings[ndx].size * atomSize
507 ranges.push_back(range);
510 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
516 vkd.unmapMemory(device, *memory);
517 VK_CHECK(vkd.mapMemory(device, *memory, config.mapping.offset * atomSize, config.mapping.size * atomSize, 0u, &ptr));
520 mapping = (deUint8*)ptr;
523 if (!config.invalidateMappings.empty())
525 vector<VkMappedMemoryRange> ranges;
527 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
529 const VkMappedMemoryRange range =
531 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
535 config.invalidateMappings[ndx].offset * atomSize,
536 config.invalidateMappings[ndx].size * atomSize
539 ranges.push_back(range);
542 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
545 if (!compareAndLogBuffer(log, (size_t)(config.mapping.size * atomSize), mapping, &reference[(size_t)(config.mapping.offset * atomSize)]))
546 result.fail("Unexpected values read from mapped memory.");
548 vkd.unmapMemory(device, *memory);
551 catch (const tcu::TestError& error)
553 result.fail(error.getMessage());
557 return tcu::TestStatus(result.getResult(), result.getMessage());
563 MemoryMapping (const MemoryRange& range,
565 ReferenceMemory& reference);
567 void randomRead (de::Random& rng);
568 void randomWrite (de::Random& rng);
569 void randomModify (de::Random& rng);
571 const MemoryRange& getRange (void) const { return m_range; }
576 ReferenceMemory& m_reference;
579 MemoryMapping::MemoryMapping (const MemoryRange& range,
581 ReferenceMemory& reference)
584 , m_reference (reference)
586 DE_ASSERT(range.size > 0);
589 void MemoryMapping::randomRead (de::Random& rng)
591 const size_t count = (size_t)rng.getInt(0, 100);
593 for (size_t ndx = 0; ndx < count; ndx++)
595 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
596 const deUint8 val = ((deUint8*)m_ptr)[pos];
598 TCU_CHECK(m_reference.read((size_t)(m_range.offset + pos), val));
602 void MemoryMapping::randomWrite (de::Random& rng)
604 const size_t count = (size_t)rng.getInt(0, 100);
606 for (size_t ndx = 0; ndx < count; ndx++)
608 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
609 const deUint8 val = rng.getUint8();
611 ((deUint8*)m_ptr)[pos] = val;
612 m_reference.write((size_t)(m_range.offset + pos), val);
616 void MemoryMapping::randomModify (de::Random& rng)
618 const size_t count = (size_t)rng.getInt(0, 100);
620 for (size_t ndx = 0; ndx < count; ndx++)
622 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
623 const deUint8 val = ((deUint8*)m_ptr)[pos];
624 const deUint8 mask = rng.getUint8();
626 ((deUint8*)m_ptr)[pos] = val ^ mask;
627 TCU_CHECK(m_reference.modifyXor((size_t)(m_range.offset + pos), val, mask));
631 VkDeviceSize randomSize (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxSize)
633 const VkDeviceSize maxSizeInAtoms = maxSize / atomSize;
635 DE_ASSERT(maxSizeInAtoms > 0);
637 return maxSizeInAtoms > 1
638 ? atomSize * (1 + (VkDeviceSize)(rng.getUint64() % (deUint64)maxSizeInAtoms))
642 VkDeviceSize randomOffset (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxOffset)
644 const VkDeviceSize maxOffsetInAtoms = maxOffset / atomSize;
646 return maxOffsetInAtoms > 0
647 ? atomSize * (VkDeviceSize)(rng.getUint64() % (deUint64)(maxOffsetInAtoms + 1))
651 void randomRanges (de::Random& rng, vector<VkMappedMemoryRange>& ranges, size_t count, VkDeviceMemory memory, VkDeviceSize minOffset, VkDeviceSize maxSize, VkDeviceSize atomSize)
653 ranges.resize(count);
655 for (size_t rangeNdx = 0; rangeNdx < count; rangeNdx++)
657 const VkDeviceSize size = randomSize(rng, atomSize, maxSize);
658 const VkDeviceSize offset = minOffset + randomOffset(rng, atomSize, maxSize - size);
660 const VkMappedMemoryRange range =
662 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
669 ranges[rangeNdx] = range;
676 MemoryObject (const DeviceInterface& vkd,
679 deUint32 memoryTypeIndex,
680 VkDeviceSize atomSize,
681 VkDeviceSize memoryUsage,
682 VkDeviceSize referenceMemoryUsage);
684 ~MemoryObject (void);
686 MemoryMapping* mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
689 void randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
690 void randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
692 VkDeviceSize getSize (void) const { return m_size; }
693 MemoryMapping* getMapping (void) { return m_mapping; }
695 VkDeviceSize getMemoryUsage (void) const { return m_memoryUsage; }
696 VkDeviceSize getReferenceMemoryUsage (void) const { return m_referenceMemoryUsage; }
698 const DeviceInterface& m_vkd;
699 const VkDevice m_device;
701 const deUint32 m_memoryTypeIndex;
702 const VkDeviceSize m_size;
703 const VkDeviceSize m_atomSize;
704 const VkDeviceSize m_memoryUsage;
705 const VkDeviceSize m_referenceMemoryUsage;
707 Move<VkDeviceMemory> m_memory;
709 MemoryMapping* m_mapping;
710 ReferenceMemory m_referenceMemory;
713 MemoryObject::MemoryObject (const DeviceInterface& vkd,
716 deUint32 memoryTypeIndex,
717 VkDeviceSize atomSize,
718 VkDeviceSize memoryUsage,
719 VkDeviceSize referenceMemoryUsage)
722 , m_memoryTypeIndex (memoryTypeIndex)
724 , m_atomSize (atomSize)
725 , m_memoryUsage (memoryUsage)
726 , m_referenceMemoryUsage (referenceMemoryUsage)
727 , m_mapping (DE_NULL)
728 , m_referenceMemory ((size_t)size, (size_t)m_atomSize)
730 m_memory = allocMemory(m_vkd, m_device, m_size, m_memoryTypeIndex);
733 MemoryObject::~MemoryObject (void)
738 MemoryMapping* MemoryObject::mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
740 const VkDeviceSize size = randomSize(rng, m_atomSize, m_size);
741 const VkDeviceSize offset = randomOffset(rng, m_atomSize, m_size - size);
744 DE_ASSERT(!m_mapping);
746 VK_CHECK(vkd.mapMemory(device, *m_memory, offset, size, 0u, &ptr));
748 m_mapping = new MemoryMapping(MemoryRange(offset, size), ptr, m_referenceMemory);
753 void MemoryObject::unmap (void)
755 m_vkd.unmapMemory(m_device, *m_memory);
761 void MemoryObject::randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
763 const size_t rangeCount = (size_t)rng.getInt(1, 10);
764 vector<VkMappedMemoryRange> ranges (rangeCount);
766 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
768 for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
769 m_referenceMemory.flush((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
771 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
774 void MemoryObject::randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
776 const size_t rangeCount = (size_t)rng.getInt(1, 10);
777 vector<VkMappedMemoryRange> ranges (rangeCount);
779 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
781 for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
782 m_referenceMemory.invalidate((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
784 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
789 MAX_MEMORY_USAGE_DIV = 2, // Use only 1/2 of each memory heap.
790 MAX_MEMORY_ALLOC_DIV = 2, // Do not alloc more than 1/2 of available space.
794 void removeFirstEqual (vector<T>& vec, const T& val)
796 for (size_t ndx = 0; ndx < vec.size(); ndx++)
800 vec[ndx] = vec.back();
809 MEMORY_CLASS_SYSTEM = 0,
815 // \todo [2016-04-20 pyry] Consider estimating memory fragmentation
816 class TotalMemoryTracker
819 TotalMemoryTracker (void)
821 std::fill(DE_ARRAY_BEGIN(m_usage), DE_ARRAY_END(m_usage), 0);
824 void allocate (MemoryClass memClass, VkDeviceSize size)
826 m_usage[memClass] += size;
829 void free (MemoryClass memClass, VkDeviceSize size)
831 DE_ASSERT(size <= m_usage[memClass]);
832 m_usage[memClass] -= size;
835 VkDeviceSize getUsage (MemoryClass memClass) const
837 return m_usage[memClass];
840 VkDeviceSize getTotalUsage (void) const
842 VkDeviceSize total = 0;
843 for (int ndx = 0; ndx < MEMORY_CLASS_LAST; ++ndx)
844 total += getUsage((MemoryClass)ndx);
849 VkDeviceSize m_usage[MEMORY_CLASS_LAST];
852 VkDeviceSize getHostPageSize (void)
857 VkDeviceSize getMinAtomSize (VkDeviceSize nonCoherentAtomSize, const vector<MemoryType>& memoryTypes)
859 for (size_t ndx = 0; ndx < memoryTypes.size(); ndx++)
861 if ((memoryTypes[ndx].type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
865 return nonCoherentAtomSize;
871 MemoryHeap (const VkMemoryHeap& heap,
872 const vector<MemoryType>& memoryTypes,
873 const PlatformMemoryLimits& memoryLimits,
874 const VkDeviceSize nonCoherentAtomSize,
875 TotalMemoryTracker& totalMemTracker)
877 , m_memoryTypes (memoryTypes)
878 , m_limits (memoryLimits)
879 , m_nonCoherentAtomSize (nonCoherentAtomSize)
880 , m_minAtomSize (getMinAtomSize(nonCoherentAtomSize, memoryTypes))
881 , m_totalMemTracker (totalMemTracker)
888 for (vector<MemoryObject*>::iterator iter = m_objects.begin(); iter != m_objects.end(); ++iter)
892 bool full (void) const;
893 bool empty (void) const
895 return m_usage == 0 && !full();
898 MemoryObject* allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
900 MemoryObject* getRandomObject (de::Random& rng) const
902 return rng.choose<MemoryObject*>(m_objects.begin(), m_objects.end());
905 void free (MemoryObject* object)
907 removeFirstEqual(m_objects, object);
908 m_usage -= object->getMemoryUsage();
909 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, object->getReferenceMemoryUsage());
910 m_totalMemTracker.free(getMemoryClass(), object->getMemoryUsage());
915 MemoryClass getMemoryClass (void) const
917 if ((m_heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
918 return MEMORY_CLASS_DEVICE;
920 return MEMORY_CLASS_SYSTEM;
923 const VkMemoryHeap m_heap;
924 const vector<MemoryType> m_memoryTypes;
925 const PlatformMemoryLimits& m_limits;
926 const VkDeviceSize m_nonCoherentAtomSize;
927 const VkDeviceSize m_minAtomSize;
928 TotalMemoryTracker& m_totalMemTracker;
930 VkDeviceSize m_usage;
931 vector<MemoryObject*> m_objects;
934 // Heap is full if there is not enough memory to allocate minimal memory object.
935 bool MemoryHeap::full (void) const
937 DE_ASSERT(m_usage <= m_heap.size/MAX_MEMORY_USAGE_DIV);
939 const VkDeviceSize availableInHeap = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
940 const bool isUMA = m_limits.totalDeviceLocalMemory == 0;
941 const MemoryClass memClass = getMemoryClass();
942 const VkDeviceSize minAllocationSize = de::max(m_minAtomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
943 // Memory required for reference. One byte and one bit for each byte and one bit per each m_atomSize.
944 const VkDeviceSize minReferenceSize = minAllocationSize
945 + divRoundUp<VkDeviceSize>(minAllocationSize, 8)
946 + divRoundUp<VkDeviceSize>(minAllocationSize, m_minAtomSize * 8);
950 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
951 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
953 DE_ASSERT(totalUsage <= totalSysMem);
955 return (minAllocationSize + minReferenceSize) > (totalSysMem - totalUsage)
956 || minAllocationSize > availableInHeap;
960 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
961 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
963 const VkDeviceSize totalMemClass = memClass == MEMORY_CLASS_SYSTEM
964 ? m_limits.totalSystemMemory
965 : m_limits.totalDeviceLocalMemory;
966 const VkDeviceSize usedMemClass = m_totalMemTracker.getUsage(memClass);
968 DE_ASSERT(usedMemClass <= totalMemClass);
970 return minAllocationSize > availableInHeap
971 || minAllocationSize > (totalMemClass - usedMemClass)
972 || minReferenceSize > (totalSysMem - totalUsage);
976 MemoryObject* MemoryHeap::allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
978 pair<MemoryType, VkDeviceSize> memoryTypeMaxSizePair;
980 // Pick random memory type
982 vector<pair<MemoryType, VkDeviceSize> > memoryTypes;
984 const VkDeviceSize availableInHeap = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
985 const bool isUMA = m_limits.totalDeviceLocalMemory == 0;
986 const MemoryClass memClass = getMemoryClass();
988 // Collect memory types that can be allocated and the maximum size of allocation.
989 // Memory type can be only allocated if minimal memory allocation is less than available memory.
990 for (size_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryTypes.size(); memoryTypeNdx++)
992 const MemoryType type = m_memoryTypes[memoryTypeNdx];
993 const VkDeviceSize atomSize = (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
995 : m_nonCoherentAtomSize;
996 const VkDeviceSize allocationSizeGranularity = de::max(atomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
997 const VkDeviceSize minAllocationSize = allocationSizeGranularity;
998 const VkDeviceSize minReferenceSize = minAllocationSize
999 + divRoundUp<VkDeviceSize>(minAllocationSize, 8)
1000 + divRoundUp<VkDeviceSize>(minAllocationSize, atomSize * 8);
1004 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1005 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1006 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1007 const VkDeviceSize availableBits = (totalSysMem - totalUsage) * 8;
1008 // availableBits == maxAllocationSizeBits + maxAllocationReferenceSizeBits
1009 // maxAllocationReferenceSizeBits == maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1010 // availableBits == maxAllocationSizeBits + maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1011 // availableBits == 2 * maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1012 // availableBits == (2 + 1/8 + 1/atomSizeBits) * maxAllocationSizeBits
1013 // 8 * availableBits == (16 + 1 + 8/atomSizeBits) * maxAllocationSizeBits
1014 // atomSizeBits * 8 * availableBits == (17 * atomSizeBits + 8) * maxAllocationSizeBits
1015 // maxAllocationSizeBits == atomSizeBits * 8 * availableBits / (17 * atomSizeBits + 8)
1016 // maxAllocationSizeBytes == maxAllocationSizeBits / 8
1017 // maxAllocationSizeBytes == atomSizeBits * availableBits / (17 * atomSizeBits + 8)
1018 // atomSizeBits = atomSize * 8
1019 // maxAllocationSizeBytes == atomSize * 8 * availableBits / (17 * atomSize * 8 + 8)
1020 // maxAllocationSizeBytes == atomSize * availableBits / (17 * atomSize + 1)
1021 const VkDeviceSize maxAllocationSize = roundDownToMultiple(((atomSize * availableBits) / (17 * atomSize + 1)), allocationSizeGranularity);
1023 DE_ASSERT(totalUsage <= totalSysMem);
1024 DE_ASSERT(maxAllocationSize <= totalSysMem);
1026 if (minAllocationSize + minReferenceSize <= (totalSysMem - totalUsage) && minAllocationSize <= availableInHeap)
1028 DE_ASSERT(maxAllocationSize >= minAllocationSize);
1029 memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1034 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1035 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1036 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1038 const VkDeviceSize totalMemClass = memClass == MEMORY_CLASS_SYSTEM
1039 ? m_limits.totalSystemMemory
1040 : m_limits.totalDeviceLocalMemory;
1041 const VkDeviceSize usedMemClass = m_totalMemTracker.getUsage(memClass);
1042 // availableRefBits = maxRefBits + maxRefBits/8 + maxRefBits/atomSizeBits
1043 // availableRefBits = maxRefBits * (1 + 1/8 + 1/atomSizeBits)
1044 // 8 * availableRefBits = maxRefBits * (8 + 1 + 8/atomSizeBits)
1045 // 8 * atomSizeBits * availableRefBits = maxRefBits * (9 * atomSizeBits + 8)
1046 // maxRefBits = 8 * atomSizeBits * availableRefBits / (9 * atomSizeBits + 8)
1047 // atomSizeBits = atomSize * 8
1048 // maxRefBits = 8 * atomSize * 8 * availableRefBits / (9 * atomSize * 8 + 8)
1049 // maxRefBits = atomSize * 8 * availableRefBits / (9 * atomSize + 1)
1050 // maxRefBytes = atomSize * availableRefBits / (9 * atomSize + 1)
1051 const VkDeviceSize maxAllocationSize = roundDownToMultiple(de::min(totalMemClass - usedMemClass, (atomSize * 8 * (totalSysMem - totalUsage)) / (9 * atomSize + 1)), allocationSizeGranularity);
1053 DE_ASSERT(usedMemClass <= totalMemClass);
1055 if (minAllocationSize <= availableInHeap
1056 && minAllocationSize <= (totalMemClass - usedMemClass)
1057 && minReferenceSize <= (totalSysMem - totalUsage))
1059 DE_ASSERT(maxAllocationSize >= minAllocationSize);
1060 memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1066 memoryTypeMaxSizePair = rng.choose<pair<MemoryType, VkDeviceSize> >(memoryTypes.begin(), memoryTypes.end());
1069 const MemoryType type = memoryTypeMaxSizePair.first;
1070 const VkDeviceSize maxAllocationSize = memoryTypeMaxSizePair.second / MAX_MEMORY_ALLOC_DIV;
1071 const VkDeviceSize atomSize = (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
1073 : m_nonCoherentAtomSize;
1074 const VkDeviceSize allocationSizeGranularity = de::max(atomSize, getMemoryClass() == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1075 const VkDeviceSize size = randomSize(rng, atomSize, maxAllocationSize);
1076 const VkDeviceSize memoryUsage = roundUpToMultiple(size, allocationSizeGranularity);
1077 const VkDeviceSize referenceMemoryUsage = size + divRoundUp<VkDeviceSize>(size, 8) + divRoundUp<VkDeviceSize>(size / atomSize, 8);
1079 DE_ASSERT(size <= maxAllocationSize);
1081 MemoryObject* const object = new MemoryObject(vkd, device, size, type.index, atomSize, memoryUsage, referenceMemoryUsage);
1083 m_usage += memoryUsage;
1084 m_totalMemTracker.allocate(getMemoryClass(), memoryUsage);
1085 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, referenceMemoryUsage);
1086 m_objects.push_back(object);
1091 size_t getMemoryObjectSystemSize (Context& context)
1093 return computeDeviceMemorySystemMemFootprint(context.getDeviceInterface(), context.getDevice())
1094 + sizeof(MemoryObject)
1095 + sizeof(de::SharedPtr<MemoryObject>);
1098 size_t getMemoryMappingSystemSize (void)
1100 return sizeof(MemoryMapping) + sizeof(de::SharedPtr<MemoryMapping>);
1103 class RandomMemoryMappingInstance : public TestInstance
1106 RandomMemoryMappingInstance (Context& context, deUint32 seed)
1107 : TestInstance (context)
1108 , m_memoryObjectSysMemSize (getMemoryObjectSystemSize(context))
1109 , m_memoryMappingSysMemSize (getMemoryMappingSystemSize())
1110 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
1114 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
1115 const InstanceInterface& vki = context.getInstanceInterface();
1116 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
1117 // \todo [2016-05-26 misojarvi] Remove zero check once drivers report correctly 1 instead of 0
1118 const VkDeviceSize nonCoherentAtomSize = context.getDeviceProperties().limits.nonCoherentAtomSize != 0
1119 ? context.getDeviceProperties().limits.nonCoherentAtomSize
1124 vector<vector<MemoryType> > memoryTypes (memoryProperties.memoryHeapCount);
1126 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < memoryProperties.memoryTypeCount; memoryTypeNdx++)
1128 if (memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
1129 memoryTypes[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].push_back(MemoryType(memoryTypeNdx, memoryProperties.memoryTypes[memoryTypeNdx]));
1132 for (deUint32 heapIndex = 0; heapIndex < memoryProperties.memoryHeapCount; heapIndex++)
1134 const VkMemoryHeap heapInfo = memoryProperties.memoryHeaps[heapIndex];
1136 if (!memoryTypes[heapIndex].empty())
1138 const de::SharedPtr<MemoryHeap> heap (new MemoryHeap(heapInfo, memoryTypes[heapIndex], m_memoryLimits, nonCoherentAtomSize, m_totalMemTracker));
1140 TCU_CHECK_INTERNAL(!heap->full());
1142 m_memoryHeaps.push_back(heap);
1148 ~RandomMemoryMappingInstance (void)
1152 tcu::TestStatus iterate (void)
1154 const size_t opCount = 100;
1155 const float memoryOpProbability = 0.5f; // 0.50
1156 const float flushInvalidateProbability = 0.4f; // 0.20
1157 const float mapProbability = 0.50f; // 0.15
1158 const float unmapProbability = 0.25f; // 0.075
1160 const float allocProbability = 0.75f; // Versun free
1162 const VkDevice device = m_context.getDevice();
1163 const DeviceInterface& vkd = m_context.getDeviceInterface();
1165 const VkDeviceSize sysMemUsage = (m_memoryLimits.totalDeviceLocalMemory == 0)
1166 ? m_totalMemTracker.getTotalUsage()
1167 : m_totalMemTracker.getUsage(MEMORY_CLASS_SYSTEM);
1169 if (!m_memoryMappings.empty() && m_rng.getFloat() < memoryOpProbability)
1171 // Perform operations on mapped memory
1172 MemoryMapping* const mapping = m_rng.choose<MemoryMapping*>(m_memoryMappings.begin(), m_memoryMappings.end());
1182 const Op op = (Op)(m_rng.getUint32() % OP_LAST);
1187 mapping->randomRead(m_rng);
1191 mapping->randomWrite(m_rng);
1195 mapping->randomModify(m_rng);
1199 DE_FATAL("Invalid operation");
1202 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < flushInvalidateProbability)
1204 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1206 if (m_rng.getBool())
1207 object->randomFlush(vkd, device, m_rng);
1209 object->randomInvalidate(vkd, device, m_rng);
1211 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < unmapProbability)
1213 // Unmap memory object
1214 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1217 removeFirstEqual(m_memoryMappings, object->getMapping());
1220 removeFirstEqual(m_mappedMemoryObjects, object);
1221 m_nonMappedMemoryObjects.push_back(object);
1223 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1225 else if (!m_nonMappedMemoryObjects.empty() &&
1226 (m_rng.getFloat() < mapProbability) &&
1227 (sysMemUsage+m_memoryMappingSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory))
1229 // Map memory object
1230 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_nonMappedMemoryObjects.begin(), m_nonMappedMemoryObjects.end());
1231 MemoryMapping* mapping = object->mapRandom(vkd, device, m_rng);
1233 m_memoryMappings.push_back(mapping);
1234 m_mappedMemoryObjects.push_back(object);
1235 removeFirstEqual(m_nonMappedMemoryObjects, object);
1237 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1241 // Sort heaps based on capacity (full or not)
1242 vector<MemoryHeap*> nonFullHeaps;
1243 vector<MemoryHeap*> nonEmptyHeaps;
1245 if (sysMemUsage+m_memoryObjectSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory)
1247 // For the duration of sorting reserve MemoryObject space from system memory
1248 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1250 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1251 heapIter != m_memoryHeaps.end();
1254 if (!(*heapIter)->full())
1255 nonFullHeaps.push_back(heapIter->get());
1257 if (!(*heapIter)->empty())
1258 nonEmptyHeaps.push_back(heapIter->get());
1261 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1265 // Not possible to even allocate MemoryObject from system memory, look for non-empty heaps
1266 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1267 heapIter != m_memoryHeaps.end();
1270 if (!(*heapIter)->empty())
1271 nonEmptyHeaps.push_back(heapIter->get());
1275 if (!nonFullHeaps.empty() && (nonEmptyHeaps.empty() || m_rng.getFloat() < allocProbability))
1277 // Reserve MemoryObject from sys mem first
1278 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1280 // Allocate more memory objects
1281 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonFullHeaps.begin(), nonFullHeaps.end());
1282 MemoryObject* const object = heap->allocateRandom(vkd, device, m_rng);
1284 m_nonMappedMemoryObjects.push_back(object);
1288 // Free memory objects
1289 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonEmptyHeaps.begin(), nonEmptyHeaps.end());
1290 MemoryObject* const object = heap->getRandomObject(m_rng);
1293 if (object->getMapping())
1295 removeFirstEqual(m_memoryMappings, object->getMapping());
1296 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, m_memoryMappingSysMemSize);
1299 removeFirstEqual(m_mappedMemoryObjects, object);
1300 removeFirstEqual(m_nonMappedMemoryObjects, object);
1303 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1308 if (m_opNdx == opCount)
1309 return tcu::TestStatus::pass("Pass");
1311 return tcu::TestStatus::incomplete();
1315 const size_t m_memoryObjectSysMemSize;
1316 const size_t m_memoryMappingSysMemSize;
1317 const PlatformMemoryLimits m_memoryLimits;
1322 TotalMemoryTracker m_totalMemTracker;
1323 vector<de::SharedPtr<MemoryHeap> > m_memoryHeaps;
1325 vector<MemoryObject*> m_mappedMemoryObjects;
1326 vector<MemoryObject*> m_nonMappedMemoryObjects;
1327 vector<MemoryMapping*> m_memoryMappings;
1336 OP_SUB_FLUSH_SEPARATE,
1337 OP_SUB_FLUSH_OVERLAPPING,
1341 OP_SUB_INVALIDATE_SEPARATE,
1342 OP_SUB_INVALIDATE_OVERLAPPING,
1349 TestConfig subMappedConfig (VkDeviceSize allocationSize,
1350 const MemoryRange& mapping,
1356 config.allocationSize = allocationSize;
1358 config.mapping = mapping;
1359 config.remap = false;
1367 config.remap = true;
1371 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1375 DE_ASSERT(mapping.size / 4 > 0);
1377 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1380 case OP_SUB_FLUSH_SEPARATE:
1381 DE_ASSERT(mapping.size / 2 > 0);
1383 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1384 config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1388 case OP_SUB_FLUSH_OVERLAPPING:
1389 DE_ASSERT((mapping.size / 3) > 0);
1391 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1392 config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1397 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1398 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1401 case OP_SUB_INVALIDATE:
1402 DE_ASSERT(mapping.size / 4 > 0);
1404 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1405 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1408 case OP_SUB_INVALIDATE_SEPARATE:
1409 DE_ASSERT(mapping.size / 2 > 0);
1411 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1412 config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1414 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1415 config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1419 case OP_SUB_INVALIDATE_OVERLAPPING:
1420 DE_ASSERT((mapping.size / 3) > 0);
1422 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1423 config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1425 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1426 config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1431 DE_FATAL("Unknown Op");
1432 return TestConfig();
1436 TestConfig fullMappedConfig (VkDeviceSize allocationSize,
1440 return subMappedConfig(allocationSize, MemoryRange(0, allocationSize), op, seed);
1445 tcu::TestCaseGroup* createMappingTests (tcu::TestContext& testCtx)
1447 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "mapping", "Memory mapping tests."));
1449 const VkDeviceSize allocationSizes[] =
1451 33, 257, 4087, 8095, 1*1024*1024 + 1
1454 const VkDeviceSize offsets[] =
1456 0, 17, 129, 255, 1025, 32*1024+1
1459 const VkDeviceSize sizes[] =
1461 31, 255, 1025, 4085, 1*1024*1024 - 1
1467 const char* const name;
1470 { OP_NONE, "simple" },
1471 { OP_REMAP, "remap" },
1472 { OP_FLUSH, "flush" },
1473 { OP_SUB_FLUSH, "subflush" },
1474 { OP_SUB_FLUSH_SEPARATE, "subflush_separate" },
1475 { OP_SUB_FLUSH_SEPARATE, "subflush_overlapping" },
1477 { OP_INVALIDATE, "invalidate" },
1478 { OP_SUB_INVALIDATE, "subinvalidate" },
1479 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_separate" },
1480 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_overlapping" }
1485 de::MovePtr<tcu::TestCaseGroup> fullGroup (new tcu::TestCaseGroup(testCtx, "full", "Map memory completely."));
1487 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1489 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1490 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1492 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1494 const Op op = ops[opNdx].op;
1495 const char* const name = ops[opNdx].name;
1496 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1497 const TestConfig config = fullMappedConfig(allocationSize, op, seed);
1499 addFunctionCase(allocationSizeGroup.get(), name, name, testMemoryMapping, config);
1502 fullGroup->addChild(allocationSizeGroup.release());
1505 group->addChild(fullGroup.release());
1510 de::MovePtr<tcu::TestCaseGroup> subGroup (new tcu::TestCaseGroup(testCtx, "sub", "Map part of the memory."));
1512 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1514 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1515 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1517 for (size_t offsetNdx = 0; offsetNdx < DE_LENGTH_OF_ARRAY(offsets); offsetNdx++)
1519 const VkDeviceSize offset = offsets[offsetNdx];
1521 if (offset >= allocationSize)
1524 de::MovePtr<tcu::TestCaseGroup> offsetGroup (new tcu::TestCaseGroup(testCtx, ("offset_" + de::toString(offset)).c_str(), ""));
1526 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
1528 const VkDeviceSize size = sizes[sizeNdx];
1530 if (offset + size > allocationSize)
1533 if (offset == 0 && size == allocationSize)
1536 de::MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(testCtx, ("size_" + de::toString(size)).c_str(), ""));
1538 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1540 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1541 const Op op = ops[opNdx].op;
1542 const char* const name = ops[opNdx].name;
1543 const TestConfig config = subMappedConfig(allocationSize, MemoryRange(offset, size), op, seed);
1545 addFunctionCase(sizeGroup.get(), name, name, testMemoryMapping, config);
1548 offsetGroup->addChild(sizeGroup.release());
1551 allocationSizeGroup->addChild(offsetGroup.release());
1554 subGroup->addChild(allocationSizeGroup.release());
1557 group->addChild(subGroup.release());
1562 de::MovePtr<tcu::TestCaseGroup> randomGroup (new tcu::TestCaseGroup(testCtx, "random", "Random memory mapping tests."));
1563 de::Random rng (3927960301u);
1565 for (size_t ndx = 0; ndx < 100; ndx++)
1567 const deUint32 seed = rng.getUint32();
1568 const std::string name = de::toString(ndx);
1570 randomGroup->addChild(new InstanceFactory1<RandomMemoryMappingInstance, deUint32>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(ndx), "Random case", seed));
1573 group->addChild(randomGroup.release());
1576 return group.release();