Code fixes from @jbolz for the memory tests.
Components: Vulkan
Affects: dEQP-VK.memory.*
Change-Id: I24bb18c90358465a5e02dce92885d26d9807780e
VkDeviceSize bufferSize;
VkExtent3D imageSize;
deUint32 targetsCount;
+ VkImageCreateFlags imageCreateFlags;
};
BindingCaseParameters makeBindingCaseParameters (deUint32 targetsCount,
deUint32 width,
- deUint32 height)
+ deUint32 height,
+ VkImageCreateFlags imageCreateFlags)
{
BindingCaseParameters params;
deMemset(¶ms, 0, sizeof(BindingCaseParameters));
params.bufferSize = params.imageSize.width * params.imageSize.height * params.imageSize.depth * sizeof(deUint32);
params.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
params.targetsCount = targetsCount;
+ params.imageCreateFlags = imageCreateFlags;
return params;
}
BindingCaseParameters makeBindingCaseParameters (deUint32 targetsCount,
VkBufferUsageFlags usage,
VkSharingMode sharing,
- VkDeviceSize bufferSize)
+ VkDeviceSize bufferSize,
+ VkImageCreateFlags imageCreateFlags)
{
BindingCaseParameters params =
{
sharing, // VkSharingMode sharing;
bufferSize, // VkDeviceSize bufferSize;
{0u, 0u, 0u}, // VkExtent3D imageSize;
- targetsCount // deUint32 targetsCount;
+ targetsCount, // deUint32 targetsCount;
+ imageCreateFlags, // VkImageCreateFlags imageCreateFlags
};
return params;
}
{
VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
DE_NULL, // const void* pNext;
- 0u, // VkImageCreateFlags flags;
+ params.imageCreateFlags, // VkImageCreateFlags flags;
VK_IMAGE_TYPE_2D, // VkImageType imageType;
VK_FORMAT_R8G8B8A8_UINT, // VkFormat format;
params.imageSize, // VkExtent3D extent;
submitCommandsAndWait(vk, vkDevice, queue, *cmdBuffer);
}
+
+template <typename TTarget>
+void layoutTransitionResource (Move<TTarget>& target,
+ Context& ctx);
+
+template <>
+void layoutTransitionResource (Move<VkBuffer>& target,
+ Context& ctx)
+{
+ DE_UNREF(target);
+ DE_UNREF(ctx);
+}
+
+template <>
+void layoutTransitionResource<VkImage> (Move<VkImage>& target,
+ Context& ctx)
+{
+ const DeviceInterface& vk = ctx.getDeviceInterface();
+ const VkDevice vkDevice = ctx.getDevice();
+ const VkQueue queue = ctx.getUniversalQueue();
+
+ const VkImageMemoryBarrier preImageBarrier = makeMemoryBarrierInfo(*target, 0u, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
+
+ Move<VkCommandPool> commandPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, 0);
+ Move<VkCommandBuffer> cmdBuffer = createCommandBuffer(vk, vkDevice, *commandPool);
+
+ beginCommandBuffer(vk, *cmdBuffer);
+ vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
+ endCommandBuffer(vk, *cmdBuffer);
+
+ submitCommandsAndWait(vk, vkDevice, queue, *cmdBuffer);
+}
+
+
void createBuffer (Move<VkBuffer>& buffer,
Move<VkDeviceMemory>& memory,
Context& ctx,
deBool passed = DE_TRUE;
for (deUint32 i = 0; passed && i < m_params.targetsCount; ++i)
{
+ // Do a layout transition on alias 1 before we transition and write to alias 0
+ layoutTransitionResource(*(targets[1][i]), m_context);
fillUpResource(srcBuffer, *(targets[0][i]), m_context, m_params);
readUpResource(*(targets[1][i]), dstBuffer, m_context, m_params);
passed = checkData(*dstMemory, 2, m_context, m_params);
for (deUint32 sizeNdx = 0u; sizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); ++sizeNdx )
{
const VkDeviceSize bufferSize = allocationSizes[sizeNdx];
- const BindingCaseParameters params = makeBindingCaseParameters(10, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_SHARING_MODE_EXCLUSIVE, bufferSize);
+ const BindingCaseParameters params = makeBindingCaseParameters(10, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_SHARING_MODE_EXCLUSIVE, bufferSize, 0u);
+ const BindingCaseParameters aliasparams = makeBindingCaseParameters(10, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, VK_SHARING_MODE_EXCLUSIVE, bufferSize, VK_IMAGE_CREATE_ALIAS_BIT);
std::ostringstream testName;
testName << "buffer_" << bufferSize;
regular_suballocated->addChild(new MemoryBindingTest<MemoryBindingInstance<VkBuffer, DE_FALSE> >(testCtx, testName.str(), " ", params));
regular_dedicated->addChild(new MemoryBindingTest<MemoryBindingInstance<VkBuffer, DE_TRUE> >(testCtx, testName.str(), " ", params));
- aliasing_suballocated->addChild(new MemoryBindingTest<AliasedMemoryBindingInstance<VkBuffer, DE_FALSE> >(testCtx, testName.str(), " ", params));
+ aliasing_suballocated->addChild(new MemoryBindingTest<AliasedMemoryBindingInstance<VkBuffer, DE_FALSE> >(testCtx, testName.str(), " ", aliasparams));
}
const deUint32 imageSizes[] = { 8, 33, 257 };
{
const deUint32 width = imageSizes[widthNdx];
const deUint32 height = imageSizes[heightNdx];
- const BindingCaseParameters regularparams = makeBindingCaseParameters(10, width, height);
+ const BindingCaseParameters regularparams = makeBindingCaseParameters(10, width, height, 0u);
+ const BindingCaseParameters aliasparams = makeBindingCaseParameters(10, width, height, VK_IMAGE_CREATE_ALIAS_BIT);
std::ostringstream testName;
testName << "image_" << width << '_' << height;
regular_suballocated->addChild(new MemoryBindingTest<MemoryBindingInstance<VkImage, DE_FALSE> >(testCtx, testName.str(), " ", regularparams));
regular_dedicated->addChild(new MemoryBindingTest<MemoryBindingInstance<VkImage, DE_TRUE> >(testCtx, testName.str(), "", regularparams));
- aliasing_suballocated->addChild(new MemoryBindingTest<AliasedMemoryBindingInstance<VkImage, DE_FALSE> >(testCtx, testName.str(), " ", regularparams));
+ aliasing_suballocated->addChild(new MemoryBindingTest<AliasedMemoryBindingInstance<VkImage, DE_FALSE> >(testCtx, testName.str(), " ", aliasparams));
}
regular->addChild(regular_suballocated.release());
const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
const vk::VkMemoryType& memoryType = memoryProperties.memoryTypes[memoryTypeIndex];
const VkMemoryHeap& memoryHeap = memoryProperties.memoryHeaps[memoryType.heapIndex];
- const VkDeviceSize atomSize = (memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
- ? 1
- : nonCoherentAtomSize;
+ const VkDeviceSize atomSize = nonCoherentAtomSize;
VkDeviceSize allocationSize = (config.allocationSize % atomSize == 0) ? config.allocationSize : config.allocationSize + (atomSize - (config.allocationSize % atomSize));
vk::VkMemoryRequirements req =
}
allocationSize = req.size;
VkDeviceSize mappingSize = (config.mapping.size % atomSize == 0) ? config.mapping.size : config.mapping.size + (atomSize - (config.mapping.size % atomSize));
- VkDeviceSize mappingOffset = (config.mapping.offset % atomSize == 0) ? config.mapping.offset : config.mapping.offset + (atomSize - (config.mapping.offset % atomSize));
+ VkDeviceSize mappingOffset = (config.mapping.offset % atomSize == 0) ? config.mapping.offset : config.mapping.offset - (config.mapping.offset % atomSize);
if (config.mapping.size == config.allocationSize && config.mapping.offset == 0u)
{
mappingSize = allocationSize;
for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
{
- const VkDeviceSize offset = (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset + (atomSize - (config.flushMappings[ndx].offset % atomSize));
+ const VkDeviceSize offset = (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset - (config.flushMappings[ndx].offset % atomSize);
const VkDeviceSize size = (config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize));
log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
}
for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
{
- const VkDeviceSize offset = (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset + (atomSize - (config.invalidateMappings[ndx].offset % atomSize));
+ const VkDeviceSize offset = (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset - (config.invalidateMappings[ndx].offset % atomSize);
const VkDeviceSize size = (config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize));
log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
}
DE_NULL,
*memory,
- (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset + (atomSize - (config.flushMappings[ndx].offset % atomSize)),
+ (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset - (config.flushMappings[ndx].offset % atomSize),
(config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize)),
};
DE_NULL,
*memory,
- (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset + (atomSize - (config.invalidateMappings[ndx].offset % atomSize)),
+ (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset - (config.invalidateMappings[ndx].offset % atomSize),
(config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize)),
};
return 4096;
}
-VkDeviceSize getMinAtomSize (VkDeviceSize nonCoherentAtomSize, const vector<MemoryType>& memoryTypes)
-{
- for (size_t ndx = 0; ndx < memoryTypes.size(); ndx++)
- {
- if ((memoryTypes[ndx].type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0)
- return 1;
- }
-
- return nonCoherentAtomSize;
-}
-
class MemoryHeap
{
public:
, m_memoryTypes (memoryTypes)
, m_limits (memoryLimits)
, m_nonCoherentAtomSize (nonCoherentAtomSize)
- , m_minAtomSize (getMinAtomSize(nonCoherentAtomSize, memoryTypes))
+ , m_minAtomSize (nonCoherentAtomSize)
, m_totalMemTracker (totalMemTracker)
, m_usage (0)
{
for (size_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryTypes.size(); memoryTypeNdx++)
{
const MemoryType type = m_memoryTypes[memoryTypeNdx];
- const VkDeviceSize atomSize = (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
- ? 1
- : m_nonCoherentAtomSize;
+ const VkDeviceSize atomSize = m_nonCoherentAtomSize;
const VkDeviceSize allocationSizeGranularity = de::max(atomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
const VkDeviceSize minAllocationSize = allocationSizeGranularity;
const VkDeviceSize minReferenceSize = minAllocationSize
const MemoryType type = memoryTypeMaxSizePair.first;
const VkDeviceSize maxAllocationSize = memoryTypeMaxSizePair.second / MAX_MEMORY_ALLOC_DIV;
- const VkDeviceSize atomSize = (type.type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) != 0
- ? 1
- : m_nonCoherentAtomSize;
+ const VkDeviceSize atomSize = m_nonCoherentAtomSize;
const VkDeviceSize allocationSizeGranularity = de::max(atomSize, getMemoryClass() == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
const VkDeviceSize size = randomSize(rng, atomSize, maxAllocationSize);
const VkDeviceSize memoryUsage = roundUpToMultiple(size, allocationSizeGranularity);
switch (op)
{
case OP_NONE:
- return config;
+ break;
case OP_REMAP:
config.remap = true;
- return config;
+ break;
case OP_FLUSH:
config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
- return config;
+ break;
case OP_SUB_FLUSH:
DE_ASSERT(mapping.size / 4 > 0);
config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
- return config;
+ break;
case OP_SUB_FLUSH_SEPARATE:
DE_ASSERT(mapping.size / 2 > 0);
config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
- return config;
+ break;
case OP_SUB_FLUSH_OVERLAPPING:
DE_ASSERT((mapping.size / 3) > 0);
config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
- return config;
+ break;
case OP_INVALIDATE:
config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
- return config;
+ break;
case OP_SUB_INVALIDATE:
DE_ASSERT(mapping.size / 4 > 0);
config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
- return config;
+ break;
case OP_SUB_INVALIDATE_SEPARATE:
DE_ASSERT(mapping.size / 2 > 0);
config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
- return config;
+ break;
case OP_SUB_INVALIDATE_OVERLAPPING:
DE_ASSERT((mapping.size / 3) > 0);
config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
- return config;
+ break;
default:
DE_FATAL("Unknown Op");
return TestConfig();
}
+ for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
+ {
+ if (config.flushMappings[ndx].offset + config.flushMappings[ndx].size > mapping.size) {
+ config.flushMappings[ndx].size = VK_WHOLE_SIZE;
+ }
+ }
+ for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
+ {
+ if (config.invalidateMappings[ndx].offset + config.invalidateMappings[ndx].size > mapping.size) {
+ config.invalidateMappings[ndx].size = VK_WHOLE_SIZE;
+ }
+ }
+ return config;
}
TestConfig fullMappedConfig (VkDeviceSize allocationSize,
{
DE_ASSERT(info.tiling == VK_IMAGE_TILING_OPTIMAL);
+ if (info.imageType == VK_IMAGE_TYPE_1D)
+ return false;
+
if (info.imageType == VK_IMAGE_TYPE_2D && !features.sparseResidencyImage2D)
return false;
if (info.imageType == VK_IMAGE_TYPE_3D && !features.sparseResidencyImage3D)
const VkPhysicalDevice physicalDevice,
const VkImageCreateInfo& info)
{
- if ((info.flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) && info.imageType != VK_IMAGE_TYPE_2D)
+ // cubemap requires arrayLayers > 1, which multiplane doesn't support
+ if (info.flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)
return false;
if ((info.usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT) &&