dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.std430_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.std430_instance_array
dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.std430_instance_array_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block_instance_array
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block_instance_array_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std140
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std140_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std140_instance_array
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std430_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std430_instance_array
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std430_instance_array_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block_instance_array
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block_instance_array_comp_access
dEQP-VK.ssbo.layout.multi_nested_struct.per_block_buffer.std140
dEQP-VK.ssbo.layout.multi_nested_struct.per_block_buffer.std140_comp_access
dEQP-VK.ssbo.layout.multi_nested_struct.per_block_buffer.std140_instance_array
dEQP-VK.ssbo.layout.random.all_shared_buffer.47
dEQP-VK.ssbo.layout.random.all_shared_buffer.48
dEQP-VK.ssbo.layout.random.all_shared_buffer.49
+dEQP-VK.ssbo.layout.random.relaxed.0
+dEQP-VK.ssbo.layout.random.relaxed.1
+dEQP-VK.ssbo.layout.random.relaxed.2
+dEQP-VK.ssbo.layout.random.relaxed.3
+dEQP-VK.ssbo.layout.random.relaxed.4
+dEQP-VK.ssbo.layout.random.relaxed.5
+dEQP-VK.ssbo.layout.random.relaxed.6
+dEQP-VK.ssbo.layout.random.relaxed.7
+dEQP-VK.ssbo.layout.random.relaxed.8
+dEQP-VK.ssbo.layout.random.relaxed.9
+dEQP-VK.ssbo.layout.random.relaxed.10
+dEQP-VK.ssbo.layout.random.relaxed.11
+dEQP-VK.ssbo.layout.random.relaxed.12
+dEQP-VK.ssbo.layout.random.relaxed.13
+dEQP-VK.ssbo.layout.random.relaxed.14
+dEQP-VK.ssbo.layout.random.relaxed.15
+dEQP-VK.ssbo.layout.random.relaxed.16
+dEQP-VK.ssbo.layout.random.relaxed.17
+dEQP-VK.ssbo.layout.random.relaxed.18
+dEQP-VK.ssbo.layout.random.relaxed.19
+dEQP-VK.ssbo.layout.random.relaxed.20
+dEQP-VK.ssbo.layout.random.relaxed.21
+dEQP-VK.ssbo.layout.random.relaxed.22
+dEQP-VK.ssbo.layout.random.relaxed.23
+dEQP-VK.ssbo.layout.random.relaxed.24
+dEQP-VK.ssbo.layout.random.relaxed.25
+dEQP-VK.ssbo.layout.random.relaxed.26
+dEQP-VK.ssbo.layout.random.relaxed.27
+dEQP-VK.ssbo.layout.random.relaxed.28
+dEQP-VK.ssbo.layout.random.relaxed.29
+dEQP-VK.ssbo.layout.random.relaxed.30
+dEQP-VK.ssbo.layout.random.relaxed.31
+dEQP-VK.ssbo.layout.random.relaxed.32
+dEQP-VK.ssbo.layout.random.relaxed.33
+dEQP-VK.ssbo.layout.random.relaxed.34
+dEQP-VK.ssbo.layout.random.relaxed.35
+dEQP-VK.ssbo.layout.random.relaxed.36
+dEQP-VK.ssbo.layout.random.relaxed.37
+dEQP-VK.ssbo.layout.random.relaxed.38
+dEQP-VK.ssbo.layout.random.relaxed.39
+dEQP-VK.ssbo.layout.random.relaxed.40
+dEQP-VK.ssbo.layout.random.relaxed.41
+dEQP-VK.ssbo.layout.random.relaxed.42
+dEQP-VK.ssbo.layout.random.relaxed.43
+dEQP-VK.ssbo.layout.random.relaxed.44
+dEQP-VK.ssbo.layout.random.relaxed.45
+dEQP-VK.ssbo.layout.random.relaxed.46
+dEQP-VK.ssbo.layout.random.relaxed.47
+dEQP-VK.ssbo.layout.random.relaxed.48
+dEQP-VK.ssbo.layout.random.relaxed.49
+dEQP-VK.ssbo.layout.random.relaxed.50
+dEQP-VK.ssbo.layout.random.relaxed.51
+dEQP-VK.ssbo.layout.random.relaxed.52
+dEQP-VK.ssbo.layout.random.relaxed.53
+dEQP-VK.ssbo.layout.random.relaxed.54
+dEQP-VK.ssbo.layout.random.relaxed.55
+dEQP-VK.ssbo.layout.random.relaxed.56
+dEQP-VK.ssbo.layout.random.relaxed.57
+dEQP-VK.ssbo.layout.random.relaxed.58
+dEQP-VK.ssbo.layout.random.relaxed.59
+dEQP-VK.ssbo.layout.random.relaxed.60
+dEQP-VK.ssbo.layout.random.relaxed.61
+dEQP-VK.ssbo.layout.random.relaxed.62
+dEQP-VK.ssbo.layout.random.relaxed.63
+dEQP-VK.ssbo.layout.random.relaxed.64
+dEQP-VK.ssbo.layout.random.relaxed.65
+dEQP-VK.ssbo.layout.random.relaxed.66
+dEQP-VK.ssbo.layout.random.relaxed.67
+dEQP-VK.ssbo.layout.random.relaxed.68
+dEQP-VK.ssbo.layout.random.relaxed.69
+dEQP-VK.ssbo.layout.random.relaxed.70
+dEQP-VK.ssbo.layout.random.relaxed.71
+dEQP-VK.ssbo.layout.random.relaxed.72
+dEQP-VK.ssbo.layout.random.relaxed.73
+dEQP-VK.ssbo.layout.random.relaxed.74
+dEQP-VK.ssbo.layout.random.relaxed.75
+dEQP-VK.ssbo.layout.random.relaxed.76
+dEQP-VK.ssbo.layout.random.relaxed.77
+dEQP-VK.ssbo.layout.random.relaxed.78
+dEQP-VK.ssbo.layout.random.relaxed.79
+dEQP-VK.ssbo.layout.random.relaxed.80
+dEQP-VK.ssbo.layout.random.relaxed.81
+dEQP-VK.ssbo.layout.random.relaxed.82
+dEQP-VK.ssbo.layout.random.relaxed.83
+dEQP-VK.ssbo.layout.random.relaxed.84
+dEQP-VK.ssbo.layout.random.relaxed.85
+dEQP-VK.ssbo.layout.random.relaxed.86
+dEQP-VK.ssbo.layout.random.relaxed.87
+dEQP-VK.ssbo.layout.random.relaxed.88
+dEQP-VK.ssbo.layout.random.relaxed.89
+dEQP-VK.ssbo.layout.random.relaxed.90
+dEQP-VK.ssbo.layout.random.relaxed.91
+dEQP-VK.ssbo.layout.random.relaxed.92
+dEQP-VK.ssbo.layout.random.relaxed.93
+dEQP-VK.ssbo.layout.random.relaxed.94
+dEQP-VK.ssbo.layout.random.relaxed.95
+dEQP-VK.ssbo.layout.random.relaxed.96
+dEQP-VK.ssbo.layout.random.relaxed.97
+dEQP-VK.ssbo.layout.random.relaxed.98
+dEQP-VK.ssbo.layout.random.relaxed.99
dEQP-VK.query_pool.occlusion_query.basic_conservative
dEQP-VK.query_pool.occlusion_query.basic_precise
dEQP-VK.query_pool.occlusion_query.get_results_conservative_size_32_wait_queue_without_availability_draw_points
switch (phase)
{
- case 0: outPtr[0] |= (deUint8)(decodedBits<<2); break;
- case 1: outPtr[0] |= (deUint8)(decodedBits>>4); outPtr[1] |= (deUint8)((decodedBits&0xF)<<4); break;
- case 2: outPtr[1] |= (deUint8)(decodedBits>>2); outPtr[2] |= (deUint8)((decodedBits&0x3)<<6); break;
- case 3: outPtr[2] |= decodedBits; break;
+ case 0: outPtr[0] |= (deUint8)(decodedBits<<2); break;
+ case 1: outPtr[0] = (deUint8)(outPtr[0] | (deUint8)(decodedBits>>4)); outPtr[1] = (deUint8)(outPtr[1] | (deUint8)((decodedBits&0xF)<<4)); break;
+ case 2: outPtr[1] = (deUint8)(outPtr[1] | (deUint8)(decodedBits>>2)); outPtr[2] = (deUint8)(outPtr[2] | (deUint8)((decodedBits&0x3)<<6)); break;
+ case 3: outPtr[2] |= decodedBits; break;
default:
DE_ASSERT(false);
}
{
enum Flags
{
- FLAG_USE_STORAGE_BUFFER_STORAGE_CLASS = (1u<<0)
+ FLAG_USE_STORAGE_BUFFER_STORAGE_CLASS = (1u<<0),
+ FLAG_ALLOW_RELAXED_OFFSETS = (1u<<1) // allow block offsets to follow VK_KHR_relaxed_block_layout
};
SpirvVersion targetVersion;
return program.sources[shaderType][0];
}
+EShMessages getCompileFlags (const GlslBuildOptions& buildOpts)
+{
+ EShMessages flags = (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules);
+
+ if ((buildOpts.flags & GlslBuildOptions::FLAG_ALLOW_RELAXED_OFFSETS) != 0)
+ flags = (EShMessages)(flags | EShMsgHlslOffsets);
+
+ return flags;
+}
+
} // anonymous
bool compileGlslToSpirV (const GlslSource& program, std::vector<deUint32>* dst, glu::ShaderProgramInfo* buildInfo)
{
TBuiltInResource builtinRes;
+ const EShMessages compileFlags = getCompileFlags(program.buildOptions);
if (program.buildOptions.targetVersion != SPIRV_VERSION_1_0)
TCU_THROW(InternalError, "Unsupported SPIR-V target version");
{
const deUint64 compileStartTime = deGetMicroseconds();
- const int compileRes = shader.parse(&builtinRes, 110, false, (EShMessages)(EShMsgSpvRules | EShMsgVulkanRules));
+ const int compileRes = shader.parse(&builtinRes, 110, false, compileFlags);
glu::ShaderInfo shaderBuildInfo;
shaderBuildInfo.type = (glu::ShaderType)shaderType;
if (buildInfo->shaders[0].compileOk)
{
const deUint64 linkStartTime = deGetMicroseconds();
- const int linkRes = program.link((EShMessages)(EShMsgSpvRules | EShMsgVulkanRules));
+ const int linkRes = program.link(compileFlags);
buildInfo->program.infoLog = program.getInfoLog(); // \todo [2015-11-05 scygan] Include debug log?
buildInfo->program.linkOk = (linkRes != 0);
if (!tcu::floatThresholdCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", m_expectedTextureLevel->getAccess(), result, fThreshold, tcu::COMPARE_LOG_RESULT))
return tcu::TestStatus::fail("CopiesAndBlitting test");
}
+ else if (isSnormFormat(mapTextureFormat(result.getFormat())))
+ {
+ // There may be an ambiguity between two possible binary representations of 1.0.
+ // Get rid of that by expanding the data to floats and re-normalizing again.
+
+ tcu::TextureLevel resultSnorm (result.getFormat(), result.getWidth(), result.getHeight(), result.getDepth());
+ {
+ tcu::TextureLevel resultFloat (tcu::TextureFormat(resultSnorm.getFormat().order, tcu::TextureFormat::FLOAT), resultSnorm.getWidth(), resultSnorm.getHeight(), resultSnorm.getDepth());
+
+ tcu::copy(resultFloat.getAccess(), result);
+ tcu::copy(resultSnorm, resultFloat.getAccess());
+ }
+
+ tcu::TextureLevel expectedSnorm (m_expectedTextureLevel->getFormat(), m_expectedTextureLevel->getWidth(), m_expectedTextureLevel->getHeight(), m_expectedTextureLevel->getDepth());
+
+ {
+ tcu::TextureLevel expectedFloat (tcu::TextureFormat(expectedSnorm.getFormat().order, tcu::TextureFormat::FLOAT), expectedSnorm.getWidth(), expectedSnorm.getHeight(), expectedSnorm.getDepth());
+
+ tcu::copy(expectedFloat.getAccess(), m_expectedTextureLevel->getAccess());
+ tcu::copy(expectedSnorm, expectedFloat.getAccess());
+ }
+
+ if (!tcu::intThresholdCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", expectedSnorm.getAccess(), resultSnorm.getAccess(), uThreshold, tcu::COMPARE_LOG_RESULT))
+ return tcu::TestStatus::fail("CopiesAndBlitting test");
+ }
else
{
if (!tcu::intThresholdCompare(m_context.getTestContext().getLog(), "Compare", "Result comparison", m_expectedTextureLevel->getAccess(), result, uThreshold, tcu::COMPARE_LOG_RESULT))
VK_CHECK(vk.beginCommandBuffer(*m_cmdBuffer, &cmdBufferBeginInfo));
vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &srcImageBarrier);
+ vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &dstImageBarrier);
vk.cmdBlitImage(*m_cmdBuffer, m_source.get(), m_params.src.image.operationLayout, m_destination.get(), m_params.dst.image.operationLayout, (deUint32)m_params.regions.size(), ®ions[0], m_params.filter);
- vk.cmdPipelineBarrier(*m_cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &dstImageBarrier);
VK_CHECK(vk.endCommandBuffer(*m_cmdBuffer));
submitCommandsAndWait(vk, vkDevice, queue, *m_cmdBuffer);
const tcu::TextureFormat dstTcuFormat = mapVkFormat(m_params.dst.image.format);
// upload the destination image
- m_destinationTextureLevel = de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(dstTcuFormat,
- (int)m_params.dst.image.extent.width,
- (int)m_params.dst.image.extent.height,
- (int)m_params.dst.image.extent.depth));
- generateBuffer(m_destinationTextureLevel->getAccess(), m_params.dst.image.extent.width, m_params.dst.image.extent.height, m_params.dst.image.extent.depth);
- uploadImage(m_destinationTextureLevel->getAccess(), m_destination.get(), m_params.dst.image);
+ m_destinationTextureLevel = de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(dstTcuFormat,
+ (int)m_params.dst.image.extent.width,
+ (int)m_params.dst.image.extent.height,
+ (int)m_params.dst.image.extent.depth));
+ generateBuffer(m_destinationTextureLevel->getAccess(), m_params.dst.image.extent.width, m_params.dst.image.extent.height, m_params.dst.image.extent.depth);
+ uploadImage(m_destinationTextureLevel->getAccess(), m_destination.get(), m_params.dst.image);
- m_sourceTextureLevel = de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(srcTcuFormat,
- (int)m_params.src.image.extent.width,
- (int)m_params.src.image.extent.height,
- (int)m_params.dst.image.extent.depth));
+ m_sourceTextureLevel = de::MovePtr<tcu::TextureLevel>(new tcu::TextureLevel(srcTcuFormat,
+ (int)m_params.src.image.extent.width,
+ (int)m_params.src.image.extent.height,
+ (int)m_params.dst.image.extent.depth));
- generateBuffer(m_sourceTextureLevel->getAccess(), m_params.src.image.extent.width, m_params.src.image.extent.height, m_params.dst.image.extent.depth, FILL_MODE_MULTISAMPLE);
- generateExpectedResult();
+ generateBuffer(m_sourceTextureLevel->getAccess(), m_params.src.image.extent.width, m_params.src.image.extent.height, m_params.dst.image.extent.depth, FILL_MODE_MULTISAMPLE);
+ generateExpectedResult();
switch (m_options)
{
const VkImageMemoryBarrier imageBarriers[] =
{
- //// source image
+ // source image
{
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
DE_NULL, // const void* pNext;
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::VkExportMemoryWin32HandleInfoKHR win32Info =
+ deUint32 exportedMemoryTypeIndex = ~0U;
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::VkExportMemoryWin32HandleInfoKHR win32Info =
{
vk::VK_STRUCTURE_TYPE_EXPORT_MEMORY_WIN32_HANDLE_INFO_KHR,
DE_NULL,
&win32Info,
(vk::VkExternalMemoryHandleTypeFlagsKHR)config.externalType
};
+
+ exportedMemoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits & compatibleMemTypes);
const vk::VkMemoryAllocateInfo info =
{
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
&exportInfo,
requirements.size,
- chooseMemoryType(requirements.memoryTypeBits & compatibleMemTypes)
+ exportedMemoryTypeIndex
};
const vk::Unique<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, *device, &info));
NativeHandle handleA;
getMemoryNative(vkd, *device, *memory, config.externalType, handleA);
{
- const vk::Unique<vk::VkDeviceMemory> memoryA (importMemory(vkd, *device, requirements, config.externalType, handleA));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handleA));
if (config.hostVisible)
{
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex));
NativeHandle handleA;
if (config.hostVisible)
{
NativeHandle handleB (handleA);
- const vk::Unique<vk::VkDeviceMemory> memoryA (importMemory(vkd, *device, requirements, config.externalType, handleA));
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handleB));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handleA));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handleB));
if (config.hostVisible)
{
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex));
NativeHandle handleA;
getMemoryNative(vkd, *device, *memory, config.externalType, handleA);
for (size_t ndx = 0; ndx < count; ndx++)
{
NativeHandle handleB (handleA);
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handleB));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handleB));
}
return tcu::TestStatus::pass("Pass");
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex));
for (size_t ndx = 0; ndx < count; ndx++)
{
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex));
if (config.hostVisible)
writeHostMemory(vkd, *device, *memory, testData.size(), &testData[0]);
TCU_CHECK_MSG(newFd.getFd() >= 0, "Failed to call dup() for memorys fd");
{
- const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, newFd));
+ const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, newFd));
if (config.hostVisible)
{
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex));
if (config.hostVisible)
writeHostMemory(vkd, *device, *memory, testData.size(), &testData[0]);
TCU_CHECK_MSG(newFd >= 0, "Failed to call dup2() for memorys fd");
{
- const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, secondFd));
+ const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, secondFd));
if (config.hostVisible)
{
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex));
if (config.hostVisible)
writeHostMemory(vkd, *device, *memory, testData.size(), &testData[0]);
TCU_CHECK_MSG(newFd >= 0, "Failed to call dup3() for memorys fd");
{
- const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, secondFd));
+ const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, secondFd));
if (config.hostVisible)
{
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
- const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> buffer (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *buffer));
+ const vk::Unique<vk::VkDeviceMemory> memory (allocateExportableMemory(vki, physicalDevice, vkd, *device, requirements, config.externalType, config.hostVisible, config.dedicated ? *buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex));
if (config.hostVisible)
writeHostMemory(vkd, *device, *memory, testData.size(), &testData[0]);
TCU_CHECK_MSG(newFd.getFd() >= 0, "Didn't receive valid fd from socket");
{
- const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, newFd));
+ const vk::Unique<vk::VkDeviceMemory> newMemory (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, newFd));
if (config.hostVisible)
{
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> bufferA (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *bufferA));
- const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *bufferA : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> bufferA (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *bufferA));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *bufferA : (vk::VkBuffer)0, exportedMemoryTypeIndex));
NativeHandle handle;
VK_CHECK(vkd.bindBufferMemory(*device, *bufferA, *memoryA, 0u));
getMemoryNative(vkd, *device, *memoryA, config.externalType, handle);
{
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handle));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handle));
const vk::Unique<vk::VkBuffer> bufferB (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
VK_CHECK(vkd.bindBufferMemory(*device, *bufferB, *memoryB, 0u));
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> bufferA (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *bufferA));
- const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *bufferA : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> bufferA (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *bufferA));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *bufferA : (vk::VkBuffer)0, exportedMemoryTypeIndex));
NativeHandle handle;
getMemoryNative(vkd, *device, *memoryA, config.externalType, handle);
VK_CHECK(vkd.bindBufferMemory(*device, *bufferA, *memoryA, 0u));
{
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handle));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handle));
const vk::Unique<vk::VkBuffer> bufferB (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
VK_CHECK(vkd.bindBufferMemory(*device, *bufferB, *memoryB, 0u));
checkBufferSupport(vki, physicalDevice, config.externalType, 0u, usage, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Buffer is only allocated to get memory requirements
- const vk::Unique<vk::VkBuffer> bufferA (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
- const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *bufferA));
- const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *bufferA : (vk::VkBuffer)0));
+ const vk::Unique<vk::VkBuffer> bufferA (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getBufferMemoryRequirements(vkd, *device, *bufferA));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *bufferA : (vk::VkBuffer)0, exportedMemoryTypeIndex));
NativeHandle handle;
getMemoryNative(vkd, *device, *memoryA, config.externalType, handle);
{
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handle));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handle));
const vk::Unique<vk::VkBuffer> bufferB (createExternalBuffer(vkd, *device, queueFamilyIndex, config.externalType, bufferSize, 0u, usage));
VK_CHECK(vkd.bindBufferMemory(*device, *bufferA, *memoryA, 0u));
checkImageSupport(vki, physicalDevice, config.externalType, 0u, usage, format, tiling, config.dedicated);
- const vk::Unique<vk::VkImage> imageA (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
- const vk::VkMemoryRequirements requirements (getImageMemoryRequirements(vkd, *device, *imageA));
- const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *imageA : (vk::VkImage)0));
+ deUint32 exportedMemoryTypeIndex = ~0U;
+ const vk::Unique<vk::VkImage> imageA (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getImageMemoryRequirements(vkd, *device, *imageA));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *imageA : (vk::VkImage)0, exportedMemoryTypeIndex));
NativeHandle handle;
VK_CHECK(vkd.bindImageMemory(*device, *imageA, *memoryA, 0u));
getMemoryNative(vkd, *device, *memoryA, config.externalType, handle);
{
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handle));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handle));
const vk::Unique<vk::VkImage> imageB (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
VK_CHECK(vkd.bindImageMemory(*device, *imageB, *memoryB, 0u));
checkImageSupport(vki, physicalDevice, config.externalType, 0u, usage, format, tiling, config.dedicated);
- const vk::Unique<vk::VkImage> imageA (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
- const vk::VkMemoryRequirements requirements (getImageMemoryRequirements(vkd, *device, *imageA));
- const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *imageA : (vk::VkImage)0));
+ deUint32 exportedMemoryTypeIndex = ~0U;
+ const vk::Unique<vk::VkImage> imageA (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getImageMemoryRequirements(vkd, *device, *imageA));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *imageA : (vk::VkImage)0, exportedMemoryTypeIndex));
NativeHandle handle;
getMemoryNative(vkd, *device, *memoryA, config.externalType, handle);
VK_CHECK(vkd.bindImageMemory(*device, *imageA, *memoryA, 0u));
{
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handle));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handle));
const vk::Unique<vk::VkImage> imageB (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
VK_CHECK(vkd.bindImageMemory(*device, *imageB, *memoryB, 0u));
checkImageSupport(vki, physicalDevice, config.externalType, 0u, usage, format, tiling, config.dedicated);
+ deUint32 exportedMemoryTypeIndex = ~0U;
// \note Image is only allocated to get memory requirements
- const vk::Unique<vk::VkImage> imageA (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
- const vk::VkMemoryRequirements requirements (getImageMemoryRequirements(vkd, *device, *imageA));
- const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *imageA : (vk::VkImage)0));
+ const vk::Unique<vk::VkImage> imageA (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
+ const vk::VkMemoryRequirements requirements (getImageMemoryRequirements(vkd, *device, *imageA));
+ const vk::Unique<vk::VkDeviceMemory> memoryA (allocateExportableMemory(vkd, *device, requirements, config.externalType, config.dedicated ? *imageA : (vk::VkImage)0, exportedMemoryTypeIndex));
NativeHandle handle;
getMemoryNative(vkd, *device, *memoryA, config.externalType, handle);
{
- const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, handle));
+ const vk::Unique<vk::VkDeviceMemory> memoryB (importMemory(vkd, *device, requirements, config.externalType, exportedMemoryTypeIndex, handle));
const vk::Unique<vk::VkImage> imageB (createExternalImage(vkd, *device, queueFamilyIndex, config.externalType, format, width, height, tiling, 0u, usage));
VK_CHECK(vkd.bindImageMemory(*device, *imageA, *memoryA, 0u));
"VK_KHR_win32_keyed_mutex",
"VK_KHR_dedicated_allocation",
"VK_KHR_variable_pointers",
+ "VK_KHR_relaxed_block_layout",
};
checkKhrExtensions(results, extensions, DE_LENGTH_OF_ARRAY(s_allowedDeviceKhrExtensions), s_allowedDeviceKhrExtensions);
MAX_CONCURRENT_INSTANCES = 32,
MAX_CONCURRENT_DEVICES = 32,
MAX_CONCURRENT_SYNC_PRIMITIVES = 100,
+ MAX_CONCURRENT_PIPELINE_CACHES = 128,
DEFAULT_MAX_CONCURRENT_OBJECTS = 16*1024,
};
static deUint32 getMaxConcurrent (Context& context, const Parameters& params)
{
- return getSafeObjectCount<PipelineCache>(context, params, DEFAULT_MAX_CONCURRENT_OBJECTS);
+ return getSafeObjectCount<PipelineCache>(context, params, MAX_CONCURRENT_PIPELINE_CACHES);
}
static Move<VkPipelineCache> create (const Environment& env, const Resources&, const Parameters&)
for (deInt32 y = 0; y < m_imageHeight; y++)
for (deInt32 x = 0; x < m_imageWidth; x++)
- refAccess.setPixel(source.getAccess().getPixelUint(int(float(x) * xscale), int(float(y) * yscale)), x, y);
+ refAccess.setPixel(source.getAccess().getPixelUint(int((float(x) + 0.5f) * xscale), int((float(y) + 0.5f) * yscale)), x, y);
}
else
DE_FATAL("Unsupported scale");
const bool allowSnorm8Bug = m_texture->getTextureFormat().type == tcu::TextureFormat::SNORM_INT8 &&
(m_samplerParams.minFilter == VK_FILTER_LINEAR || m_samplerParams.magFilter == VK_FILTER_LINEAR);
+ const bool isNearestOnly = (m_samplerParams.minFilter == VK_FILTER_NEAREST && m_samplerParams.magFilter == VK_FILTER_NEAREST);
tcu::LookupPrecision lookupPrecision;
lookupPrecision.coordBits = tcu::IVec3(17, 17, 17);
lookupPrecision.uvwBits = tcu::IVec3(5, 5, 5);
lookupPrecision.colorMask = tcu::BVec4(true);
- lookupPrecision.colorThreshold = tcu::computeFixedPointThreshold(tcu::IVec4(8, 8, 8, 8)) / swizzleScaleBias(lookupScale, m_componentMapping);
+ lookupPrecision.colorThreshold = tcu::computeFixedPointThreshold(max((tcu::IVec4(8, 8, 8, 8) - (isNearestOnly ? 1 : 2)), tcu::IVec4(0))) / swizzleScaleBias(lookupScale, m_componentMapping);
if (tcu::isSRGB(m_texture->getTextureFormat()))
lookupPrecision.colorThreshold += tcu::Vec4(4.f / 255.f);
DE_ASSERT(ndx < 4);
DE_ASSERT(m_status <= 0xFFu);
- m_status |= (deUint16)(0x1u << (ndx * 2));
+ m_status = (deUint16)(m_status | (deUint16)(0x1u << (ndx * 2)));
if (value)
- m_status |= (deUint16)(0x1u << (ndx * 2 + 1));
+ m_status = (deUint16)(m_status | (deUint16)(0x1u << (ndx * 2 + 1)));
else
m_status &= (deUint16)~(0x1u << (deUint16)(ndx * 2 + 1));
}
}
+ deUint32 metadataAspectIndex = noMatchFound;
+ for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx)
+ {
+ if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask & VK_IMAGE_ASPECT_METADATA_BIT)
+ {
+ metadataAspectIndex = memoryReqNdx;
+ break;
+ }
+ }
+
if (colorAspectIndex == noMatchFound)
TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT.");
if (memoryRequirements.size > deviceProperties.limits.sparseAddressSpaceSize)
TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits.");
- // Check if the image format supports sparse oprerations
+ // Check if the image format supports sparse operations
const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec =
getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType, imageCreateInfo.samples, imageCreateInfo.usage, imageCreateInfo.tiling);
// 1) VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT is requested by the driver: each layer needs a separate tail.
// 2) otherwise: only one tail is needed.
{
- if ( imageMipTailMemoryBinds.size() == 0 ||
- (imageMipTailMemoryBinds.size() != 0 && (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0))
+ if (imageMipTailMemoryBinds.size() == 0 || (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0)
{
const VkMemoryRequirements allocRequirements =
{
m_allocations.push_back(allocation);
imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
}
+
+ // Metadata
+ if (metadataAspectIndex != noMatchFound)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseImageMemoryRequirements[metadataAspectIndex];
+
+ if (imageMipTailMemoryBinds.size() == 1 || (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0)
+ {
+ const VkMemoryRequirements metadataAllocRequirements =
+ {
+ metadataAspectRequirements.imageMipTailSize, // VkDeviceSize size;
+ memoryRequirements.alignment, // VkDeviceSize alignment;
+ memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
+ };
+ const de::SharedPtr<Allocation> metadataAllocation(m_memAlloc.allocate(metadataAllocRequirements, MemoryRequirement::Any).release());
+
+ const VkSparseMemoryBind metadataMipTailMemoryBind =
+ {
+ metadataAspectRequirements.imageMipTailOffset +
+ layerNdx * metadataAspectRequirements.imageMipTailStride, // VkDeviceSize resourceOffset;
+ metadataAspectRequirements.imageMipTailSize, // VkDeviceSize size;
+ metadataAllocation->getMemory(), // VkDeviceMemory memory;
+ metadataAllocation->getOffset(), // VkDeviceSize memoryOffset;
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT // VkSparseMemoryBindFlags flags;
+ };
+
+ m_allocations.push_back(metadataAllocation);
+ imageMipTailMemoryBinds.push_back(metadataMipTailMemoryBind);
+ }
+ }
}
}
DE_ASSERT(sparseMemoryRequirements.size() != 0);
- const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
if (colorAspectIndex == NO_MATCH_FOUND)
TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
}
+
+ // Metadata
+ if (metadataAspectIndex != NO_MATCH_FOUND)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+
+ if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ metadataAspectRequirements.imageMipTailSize, memoryType,
+ metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride,
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
+ }
}
if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels)
imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
}
+ // Metadata
+ if (metadataAspectIndex != NO_MATCH_FOUND)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+
+ if ((metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset,
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
+ }
+
VkBindSparseInfo bindSparseInfo =
{
VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
DE_ASSERT(sparseMemoryRequirements.size() != 0);
- const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
if (colorAspectIndex == NO_MATCH_FOUND)
TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
}
+
+ // Metadata
+ if (metadataAspectIndex != NO_MATCH_FOUND)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+
+ if (!(metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT))
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ metadataAspectRequirements.imageMipTailSize, memoryType,
+ metadataAspectRequirements.imageMipTailOffset + layerNdx * metadataAspectRequirements.imageMipTailStride,
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
+ }
}
if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels)
imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
}
+ // Metadata
+ if (metadataAspectIndex != NO_MATCH_FOUND)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+
+ if (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT)
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ metadataAspectRequirements.imageMipTailSize, memoryType, metadataAspectRequirements.imageMipTailOffset,
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
+ }
+ }
+
VkBindSparseInfo bindSparseInfo =
{
VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
DE_ASSERT(sparseMemoryRequirements.size() != 0);
- const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT);
+ const deUint32 metadataAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_METADATA_BIT);
if (colorAspectIndex == NO_MATCH_FOUND)
TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT");
}
}
+ // Metadata
+ if (metadataAspectIndex != NO_MATCH_FOUND)
+ {
+ const VkSparseImageMemoryRequirements metadataAspectRequirements = sparseMemoryRequirements[metadataAspectIndex];
+
+ const deUint32 metadataBindCount = (metadataAspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT ? 1u : imageSparseInfo.arrayLayers);
+ for (deUint32 bindNdx = 0u; bindNdx < metadataBindCount; ++bindNdx)
+ {
+ const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, getDevice(),
+ metadataAspectRequirements.imageMipTailSize, memoryType,
+ metadataAspectRequirements.imageMipTailOffset + bindNdx * metadataAspectRequirements.imageMipTailStride,
+ VK_SPARSE_MEMORY_BIND_METADATA_BIT);
+
+ deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move<VkDeviceMemory>(check<VkDeviceMemory>(imageMipTailMemoryBind.memory), Deleter<VkDeviceMemory>(deviceInterface, getDevice(), DE_NULL))));
+
+ imageMipTailBinds.push_back(imageMipTailMemoryBind);
+ }
+ }
+
VkBindSparseInfo bindSparseInfo =
{
VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
return imageSizeInBytes;
}
-VkSparseImageMemoryBind makeSparseImageMemoryBind (const DeviceInterface& vk,
- const VkDevice device,
- const VkDeviceSize allocationSize,
- const deUint32 memoryType,
- const VkImageSubresource& subresource,
- const VkOffset3D& offset,
- const VkExtent3D& extent)
+VkSparseImageMemoryBind makeSparseImageMemoryBind (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize allocationSize,
+ const deUint32 memoryType,
+ const VkImageSubresource& subresource,
+ const VkOffset3D& offset,
+ const VkExtent3D& extent)
{
const VkMemoryAllocateInfo allocInfo =
{
return imageMemoryBind;
}
-VkSparseMemoryBind makeSparseMemoryBind (const DeviceInterface& vk,
- const VkDevice device,
- const VkDeviceSize allocationSize,
- const deUint32 memoryType,
- const VkDeviceSize resourceOffset)
+VkSparseMemoryBind makeSparseMemoryBind (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDeviceSize allocationSize,
+ const deUint32 memoryType,
+ const VkDeviceSize resourceOffset,
+ const VkSparseMemoryBindFlags flags)
{
const VkMemoryAllocateInfo allocInfo =
{
memoryBind.size = allocationSize;
memoryBind.memory = deviceMemory;
memoryBind.memoryOffset = 0u;
- memoryBind.flags = 0u;
+ memoryBind.flags = flags;
return memoryBind;
}
const vk::VkDevice device,
const vk::VkDeviceSize allocationSize,
const deUint32 memoryType,
- const vk::VkDeviceSize resourceOffset);
+ const vk::VkDeviceSize resourceOffset,
+ const vk::VkSparseMemoryBindFlags flags = 0u);
void beginCommandBuffer (const vk::DeviceInterface& vk,
const vk::VkCommandBuffer cmdBuffer);
vktSSBOLayoutCase.cpp
vktSSBOLayoutCase.hpp
vktSSBOLayoutTests.cpp
- vktSSBOLayoutTests.cpp
+ vktSSBOLayoutTests.hpp
)
set(DEQP_VK_SSBO_LIBS
#include "deMath.h"
#include "deSharedPtr.hpp"
-#include <algorithm>
-#include <map>
-
#include "vkBuilderUtil.hpp"
#include "vkMemUtil.hpp"
#include "vkPrograms.hpp"
using tcu::TestLog;
using std::string;
using std::vector;
-using std::map;
using glu::VarType;
using glu::StructType;
using glu::StructMember;
: m_name (name)
, m_type (type)
, m_flags (flags)
+ , m_offset (~0u)
{
}
// BufferBlock implementation.
BufferBlock::BufferBlock (const char* blockName)
- : m_blockName (blockName)
- , m_arraySize (-1)
- , m_flags (0)
+ : m_blockName (blockName)
+ , m_arraySize (-1)
+ , m_flags (0)
{
setArraySize(0);
}
const int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
: glu::getDataTypeMatrixNumRows(basicType);
const int vecAlign = getDataTypeByteAlignment(glu::getDataTypeFloatVec(vecSize));
-
return vecAlign;
}
else
}
}
+int computeRelaxedBlockBaseAlignment (const VarType& type, deUint32 layoutFlags)
+{
+ if (type.isBasicType())
+ {
+ glu::DataType basicType = type.getBasicType();
+
+ if (glu::isDataTypeVector(basicType))
+ return 4;
+
+ if (glu::isDataTypeMatrix(basicType))
+ {
+ const bool isRowMajor = !!(layoutFlags & LAYOUT_ROW_MAJOR);
+ const int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
+ : glu::getDataTypeMatrixNumRows(basicType);
+ const int vecAlign = getDataTypeByteAlignment(glu::getDataTypeFloatVec(vecSize));
+ return vecAlign;
+ }
+ else
+ return getDataTypeByteAlignment(basicType);
+ }
+ else if (type.isArrayType())
+ return computeStd430BaseAlignment(type.getElementType(), layoutFlags);
+ else
+ {
+ DE_ASSERT(type.isStructType());
+
+ int maxBaseAlignment = 0;
+ for (StructType::ConstIterator memberIter = type.getStructPtr()->begin(); memberIter != type.getStructPtr()->end(); memberIter++)
+ maxBaseAlignment = de::max(maxBaseAlignment, computeRelaxedBlockBaseAlignment(memberIter->getType(), layoutFlags));
+
+ return maxBaseAlignment;
+ }
+}
+
inline deUint32 mergeLayoutFlags (deUint32 prevFlags, deUint32 newFlags)
{
- const deUint32 packingMask = LAYOUT_STD430|LAYOUT_STD140;
+ const deUint32 packingMask = LAYOUT_STD430|LAYOUT_STD140|LAYOUT_RELAXED;
const deUint32 matrixMask = LAYOUT_ROW_MAJOR|LAYOUT_COLUMN_MAJOR;
deUint32 mergedFlags = 0;
return mergedFlags;
}
+template <class T>
+bool isPow2(T powerOf2)
+{
+ if (powerOf2 <= 0)
+ return false;
+ return (powerOf2 & (powerOf2 - (T)1)) == (T)0;
+}
+
+template <class T>
+T roundToPow2(T number, int powerOf2)
+{
+ DE_ASSERT(isPow2(powerOf2));
+ return (number + (T)powerOf2 - (T)1) & (T)(~(powerOf2 - 1));
+}
+
//! Appends all child elements to layout, returns value that should be appended to offset.
int computeReferenceLayout (
BufferLayout& layout,
{
// Reference layout uses std430 rules by default. std140 rules are
// choosen only for blocks that have std140 layout.
- const bool isStd140 = (layoutFlags & LAYOUT_STD140) != 0;
- const int baseAlignment = isStd140 ? computeStd140BaseAlignment(type, layoutFlags)
- : computeStd430BaseAlignment(type, layoutFlags);
+ const int baseAlignment = (layoutFlags & LAYOUT_STD140) != 0 ? computeStd140BaseAlignment(type, layoutFlags) :
+ (layoutFlags & LAYOUT_RELAXED) != 0 ? computeRelaxedBlockBaseAlignment(type, layoutFlags) :
+ computeStd430BaseAlignment(type, layoutFlags);
int curOffset = deAlign32(baseOffset, baseAlignment);
const int topLevelArraySize = 1; // Default values
const int topLevelArrayStride = 0;
}
else
{
+ if (glu::isDataTypeVector(basicType) && (getDataTypeByteSize(basicType) <= 16 ? curOffset / 16 != (curOffset + getDataTypeByteSize(basicType) - 1) / 16 : curOffset % 16 != 0) && (layoutFlags & LAYOUT_RELAXED))
+ curOffset = roundToPow2(curOffset, 16);
+
// Scalar or vector.
entry.offset = curOffset;
const string prefix = blockPrefix + bufVar.getName() + "[0]";
const bool isStd140 = (blockLayoutFlags & LAYOUT_STD140) != 0;
const int vec4Align = (int)sizeof(deUint32)*4;
- const int baseAlignment = isStd140 ? computeStd140BaseAlignment(varType, combinedFlags)
- : computeStd430BaseAlignment(varType, combinedFlags);
+ const int baseAlignment = isStd140 ? computeStd140BaseAlignment(varType, combinedFlags) :
+ (blockLayoutFlags & LAYOUT_RELAXED) != 0 ? computeRelaxedBlockBaseAlignment(varType, combinedFlags) :
+ computeStd430BaseAlignment(varType, combinedFlags);
int curOffset = deAlign32(baseOffset, baseAlignment);
const VarType& elemType = varType.getElementType();
return computeReferenceLayout(layout, curBlockNdx, baseOffset, blockPrefix + bufVar.getName(), varType, combinedFlags);
}
-void computeReferenceLayout (BufferLayout& layout, const ShaderInterface& interface)
+void computeReferenceLayout (BufferLayout& layout, ShaderInterface& interface)
{
int numBlocks = interface.getNumBlocks();
for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
{
- const BufferBlock& block = interface.getBlock(blockNdx);
+ BufferBlock& block = interface.getBlock(blockNdx);
bool hasInstanceName = block.getInstanceName() != DE_NULL;
std::string blockPrefix = hasInstanceName ? (std::string(block.getBlockName()) + ".") : std::string("");
int curOffset = 0;
int activeBlockNdx = (int)layout.blocks.size();
int firstVarNdx = (int)layout.bufferVars.size();
- for (BufferBlock::const_iterator varIter = block.begin(); varIter != block.end(); varIter++)
+ size_t oldSize = layout.bufferVars.size();
+ for (BufferBlock::iterator varIter = block.begin(); varIter != block.end(); varIter++)
{
- const BufferVar& bufVar = *varIter;
+ BufferVar& bufVar = *varIter;
curOffset += computeReferenceLayout(layout, activeBlockNdx, blockPrefix, curOffset, bufVar, block.getFlags());
+ if (block.getFlags() & LAYOUT_RELAXED)
+ {
+ DE_ASSERT(!(layout.bufferVars.size() <= oldSize));
+ bufVar.setOffset(layout.bufferVars[oldSize].offset);
+ }
+ oldSize = layout.bufferVars.size();
}
int varIndicesEnd = (int)layout.bufferVars.size();
}
}
+bool usesRelaxedLayout (const ShaderInterface& interface)
+{
+ //If any of blocks has LAYOUT_RELAXED flag
+ for (int ndx = 0; ndx < interface.getNumBlocks(); ++ndx)
+ {
+ if (interface.getBlock(ndx).getFlags() & LAYOUT_RELAXED)
+ return true;
+ }
+ return false;
+}
+
struct Indent
{
int level;
void generateDeclaration (std::ostream& src, const BufferVar& bufferVar, int indentLevel)
{
// \todo [pyry] Qualifiers
-
if ((bufferVar.getFlags() & LAYOUT_MASK) != 0)
src << "layout(" << LayoutFlagsFmt(bufferVar.getFlags() & LAYOUT_MASK) << ") ";
+ else if (bufferVar.getOffset()!= ~0u)
+ src << "layout(offset = "<<bufferVar.getOffset()<<") ";
src << glu::declare(bufferVar.getType(), bufferVar.getName(), indentLevel);
}
void generateDeclaration (std::ostream& src, const BufferBlock& block, int bindingPoint)
{
src << "layout(";
-
if ((block.getFlags() & LAYOUT_MASK) != 0)
src << LayoutFlagsFmt(block.getFlags() & LAYOUT_MASK) << ", ";
for (BufferBlock::const_iterator varIter = block.begin(); varIter != block.end(); varIter++)
{
src << Indent(1);
+
generateDeclaration(src, *varIter, 1 /* indent level */);
src << ";\n";
}
{
std::ostringstream src;
- src << "#version 310 es\n";
+ if (usesRelaxedLayout(interface))
+ src << "#version 450\n";
+ else
+ src << "#version 310 es\n";
+
src << "layout(local_size_x = 1) in;\n";
src << "\n";
const int refCount = 1;
int resCount = 0;
- resCount = *(const int*)((const deUint8*)acBufferAlloc->getHostPtr());
+ resCount = *((const int*)acBufferAlloc->getHostPtr());
counterOk = (refCount == resCount);
if (!counterOk)
{
DE_ASSERT(!m_computeShaderSrc.empty());
- programCollection.glslSources.add("compute") << glu::ComputeSource(m_computeShaderSrc);
+ if (usesRelaxedLayout(m_interface))
+ {
+ programCollection.glslSources.add("compute") << glu::ComputeSource(m_computeShaderSrc)
+ << vk::GlslBuildOptions(vk::SPIRV_VERSION_1_0, vk::GlslBuildOptions::FLAG_ALLOW_RELAXED_OFFSETS);
+ }
+ else
+ programCollection.glslSources.add("compute") << glu::ComputeSource(m_computeShaderSrc);
}
TestInstance* SSBOLayoutCase::createInstance (Context& context) const
{
+ if (!de::contains(context.getDeviceExtensions().begin(), context.getDeviceExtensions().end(), "VK_KHR_relaxed_block_layout") && usesRelaxedLayout(m_interface))
+ TCU_THROW(NotSupportedError, "VK_KHR_relaxed_block_layout not supported");
return new SSBOLayoutCaseInstance(context, m_bufferMode, m_interface, m_refLayout, m_initialData, m_writeData);
}
copyNonWrittenData (m_interface, m_refLayout, m_initialData.pointers, m_writeData.pointers);
m_computeShaderSrc = generateComputeShader(m_interface, m_refLayout, m_initialData.pointers, m_writeData.pointers, m_matrixLoadFlag);
-
}
} // ssbo
QUALIFIER_RESTRICT = (1<<6),
QUALIFIER_READONLY = (1<<7),
QUALIFIER_WRITEONLY = (1<<8),*/
-
ACCESS_READ = (1<<9), //!< Buffer variable is read in the shader.
ACCESS_WRITE = (1<<10), //!< Buffer variable is written in the shader.
+ LAYOUT_RELAXED = (1<<11), //!< Support VK_KHR_relaxed_block_layout extension
};
enum MatrixLoadFlags
public:
BufferVar (const char* name, const glu::VarType& type, deUint32 flags);
- const char* getName (void) const { return m_name.c_str(); }
- const glu::VarType& getType (void) const { return m_type; }
- deUint32 getFlags (void) const { return m_flags; }
+ const char* getName (void) const { return m_name.c_str(); }
+ const glu::VarType& getType (void) const { return m_type; }
+ deUint32 getFlags (void) const { return m_flags; }
+ deUint32 getOffset (void) const { return m_offset; }
+
+ void setOffset (deUint32 offset) { m_offset = offset; }
private:
std::string m_name;
glu::VarType m_type;
deUint32 m_flags;
+ deUint32 m_offset;
};
class BufferBlock
BufferBlock (const char* blockName);
- const char* getBlockName (void) const { return m_blockName.c_str(); }
- const char* getInstanceName (void) const { return m_instanceName.empty() ? DE_NULL : m_instanceName.c_str(); }
- bool isArray (void) const { return m_arraySize > 0; }
- int getArraySize (void) const { return m_arraySize; }
- deUint32 getFlags (void) const { return m_flags; }
+ const char* getBlockName (void) const { return m_blockName.c_str(); }
+ const char* getInstanceName (void) const { return m_instanceName.empty() ? DE_NULL : m_instanceName.c_str(); }
+ bool isArray (void) const { return m_arraySize > 0; }
+ int getArraySize (void) const { return m_arraySize; }
+ deUint32 getFlags (void) const { return m_flags; }
- void setInstanceName (const char* name) { m_instanceName = name; }
- void setFlags (deUint32 flags) { m_flags = flags; }
- void addMember (const BufferVar& var) { m_variables.push_back(var); }
+ void setInstanceName (const char* name) { m_instanceName = name; }
+ void setFlags (deUint32 flags) { m_flags = flags; }
+ void addMember (const BufferVar& var) { m_variables.push_back(var); }
void setArraySize (int arraySize);
- int getLastUnsizedArraySize (int instanceNdx) const { return m_lastUnsizedArraySizes[instanceNdx]; }
- void setLastUnsizedArraySize (int instanceNdx, int size) { m_lastUnsizedArraySizes[instanceNdx] = size; }
+ int getLastUnsizedArraySize (int instanceNdx) const { return m_lastUnsizedArraySizes[instanceNdx]; }
+ void setLastUnsizedArraySize (int instanceNdx, int size) { m_lastUnsizedArraySizes[instanceNdx] = size; }
- inline iterator begin (void) { return m_variables.begin(); }
- inline const_iterator begin (void) const { return m_variables.begin(); }
- inline iterator end (void) { return m_variables.end(); }
- inline const_iterator end (void) const { return m_variables.end(); }
+ inline iterator begin (void) { return m_variables.begin(); }
+ inline const_iterator begin (void) const { return m_variables.begin(); }
+ inline iterator end (void) { return m_variables.end(); }
+ inline const_iterator end (void) const { return m_variables.end(); }
private:
std::string m_blockName;
int getNumBlocks (void) const { return (int)m_bufferBlocks.size(); }
const BufferBlock& getBlock (int ndx) const { return *m_bufferBlocks[ndx]; }
+ BufferBlock& getBlock (int ndx) { return *m_bufferBlocks[ndx]; }
private:
ShaderInterface (const ShaderInterface&);
virtual TestInstance* createInstance (Context& context) const;
protected:
- void init (void);
+ void init (void);
BufferMode m_bufferMode;
ShaderInterface m_interface;
MatrixLoadFlags m_matrixLoadFlag;
+ std::string m_computeShaderSrc;
private:
SSBOLayoutCase (const SSBOLayoutCase&);
BufferLayout m_refLayout;
RefDataStorage m_initialData; // Initial data stored in buffer.
RefDataStorage m_writeData; // Data written by compute shader.
- std::string m_computeShaderSrc;
};
} // ssbo
FEATURE_STD430_LAYOUT = (1<<9),
FEATURE_MATRIX_LAYOUT = (1<<10), //!< Matrix layout flags.
FEATURE_UNSIZED_ARRAYS = (1<<11),
- FEATURE_ARRAYS_OF_ARRAYS = (1<<12)
+ FEATURE_ARRAYS_OF_ARRAYS = (1<<12),
+ FEATURE_RELAXED_LAYOUT = (1<<13)
};
class RandomSSBOLayoutCase : public SSBOLayoutCase
// Layout flag candidates.
vector<deUint32> layoutFlagCandidates;
- layoutFlagCandidates.push_back(0);
+
+ if (m_features & FEATURE_STD430_LAYOUT)
+ layoutFlagCandidates.push_back(LAYOUT_STD430);
+
if (m_features & FEATURE_STD140_LAYOUT)
layoutFlagCandidates.push_back(LAYOUT_STD140);
+ if (m_features & FEATURE_RELAXED_LAYOUT)
+ layoutFlagCandidates.push_back(LAYOUT_RELAXED);
+
+ DE_ASSERT(!layoutFlagCandidates.empty());
+
layoutFlags |= rnd.choose<deUint32>(layoutFlagCandidates.begin(), layoutFlagCandidates.end());
if (m_features & FEATURE_MATRIX_LAYOUT)
class BlockMultiBasicTypesCase : public SSBOLayoutCase
{
public:
- BlockMultiBasicTypesCase (tcu::TestContext& testCtx, const char* name, const char* description, deUint32 flagsA, deUint32 flagsB, BufferMode bufferMode, int numInstances, MatrixLoadFlags matrixLoadFlag)
- : SSBOLayoutCase (testCtx, name, description, bufferMode, matrixLoadFlag)
- , m_flagsA (flagsA)
- , m_flagsB (flagsB)
- , m_numInstances (numInstances)
+ BlockMultiBasicTypesCase (tcu::TestContext& testCtx, const char* name, const char* description, deUint32 flagsA, deUint32 flagsB, BufferMode bufferMode, int numInstances, MatrixLoadFlags matrixLoadFlag)
+ : SSBOLayoutCase (testCtx, name, description, bufferMode, matrixLoadFlag)
+ , m_flagsA (flagsA)
+ , m_flagsB (flagsB)
+ , m_numInstances (numInstances)
{
BufferBlock& blockA = m_interface.allocBlock("BlockA");
blockA.addMember(BufferVar("a", VarType(glu::TYPE_FLOAT, glu::PRECISION_HIGHP), ACCESS_READ|ACCESS_WRITE));
modeGroup->addChild(new BlockMultiBasicTypesCase(m_testCtx, (baseName + "_comp_access").c_str(), "", baseFlags, baseFlags, bufferModes[modeNdx].mode, isArray ? 3 : 0, LOAD_MATRIX_COMPONENTS));
}
}
+
+ for (int isArray = 0; isArray < 2; isArray++)
+ {
+ std::string baseName = "relaxed_block";
+ deUint32 baseFlags = LAYOUT_RELAXED;
+
+ if (isArray)
+ baseName += "_instance_array";
+
+ modeGroup->addChild(new BlockMultiBasicTypesCase(m_testCtx, baseName.c_str(), "", baseFlags, baseFlags, bufferModes[modeNdx].mode, isArray ? 3 : 0, LOAD_FULL_MATRIX));
+ modeGroup->addChild(new BlockMultiBasicTypesCase(m_testCtx, (baseName + "_comp_access").c_str(), "", baseFlags, baseFlags, bufferModes[modeNdx].mode, isArray ? 3 : 0, LOAD_MATRIX_COMPONENTS));
+ }
}
}
// ssbo.random
{
- const deUint32 allLayouts = FEATURE_STD140_LAYOUT;
+ const deUint32 allStdLayouts = FEATURE_STD140_LAYOUT|FEATURE_STD430_LAYOUT;
const deUint32 allBasicTypes = FEATURE_VECTORS|FEATURE_MATRICES;
const deUint32 unused = FEATURE_UNUSED_MEMBERS|FEATURE_UNUSED_VARS;
const deUint32 unsized = FEATURE_UNSIZED_ARRAYS;
const deUint32 matFlags = FEATURE_MATRIX_LAYOUT;
+ const deUint32 allButRelaxed = ~FEATURE_RELAXED_LAYOUT;
+ const deUint32 allRelaxed = FEATURE_VECTORS|FEATURE_RELAXED_LAYOUT|FEATURE_INSTANCE_ARRAYS;
tcu::TestCaseGroup* randomGroup = new tcu::TestCaseGroup(m_testCtx, "random", "Random Uniform Block cases");
addChild(randomGroup);
// Basic types.
- createRandomCaseGroup(randomGroup, m_testCtx, "scalar_types", "Scalar types only, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused, 25, 0);
- createRandomCaseGroup(randomGroup, m_testCtx, "vector_types", "Scalar and vector types only, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|FEATURE_VECTORS, 25, 25);
- createRandomCaseGroup(randomGroup, m_testCtx, "basic_types", "All basic types, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags, 25, 50);
- createRandomCaseGroup(randomGroup, m_testCtx, "basic_arrays", "Arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|FEATURE_ARRAYS, 25, 50);
- createRandomCaseGroup(randomGroup, m_testCtx, "unsized_arrays", "Unsized arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_ARRAYS, 25, 50);
- createRandomCaseGroup(randomGroup, m_testCtx, "arrays_of_arrays", "Arrays of arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_ARRAYS|FEATURE_ARRAYS_OF_ARRAYS, 25, 950);
-
- createRandomCaseGroup(randomGroup, m_testCtx, "basic_instance_arrays", "Basic instance arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_INSTANCE_ARRAYS, 25, 75);
- createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs", "Nested structs, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS, 25, 100);
- createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs_arrays", "Nested structs, arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS|FEATURE_ARRAYS|FEATURE_ARRAYS_OF_ARRAYS, 25, 150);
- createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs_instance_arrays", "Nested structs, instance arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS|FEATURE_INSTANCE_ARRAYS, 25, 125);
- createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs_arrays_instance_arrays", "Nested structs, instance arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS|FEATURE_ARRAYS|FEATURE_ARRAYS_OF_ARRAYS|FEATURE_INSTANCE_ARRAYS, 25, 175);
-
- createRandomCaseGroup(randomGroup, m_testCtx, "all_per_block_buffers", "All random features, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, ~0u, 50, 200);
- createRandomCaseGroup(randomGroup, m_testCtx, "all_shared_buffer", "All random features, shared buffer", SSBOLayoutCase::BUFFERMODE_SINGLE, ~0u, 50, 250);
+ createRandomCaseGroup(randomGroup, m_testCtx, "scalar_types", "Scalar types only, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused, 25, 0);
+ createRandomCaseGroup(randomGroup, m_testCtx, "vector_types", "Scalar and vector types only, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|FEATURE_VECTORS, 25, 25);
+ createRandomCaseGroup(randomGroup, m_testCtx, "basic_types", "All basic types, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags, 25, 50);
+ createRandomCaseGroup(randomGroup, m_testCtx, "basic_arrays", "Arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|FEATURE_ARRAYS, 25, 50);
+ createRandomCaseGroup(randomGroup, m_testCtx, "unsized_arrays", "Unsized arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_ARRAYS, 25, 50);
+ createRandomCaseGroup(randomGroup, m_testCtx, "arrays_of_arrays", "Arrays of arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_ARRAYS|FEATURE_ARRAYS_OF_ARRAYS, 25, 950);
+
+ createRandomCaseGroup(randomGroup, m_testCtx, "basic_instance_arrays", "Basic instance arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_INSTANCE_ARRAYS, 25, 75);
+ createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs", "Nested structs, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS, 25, 100);
+ createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs_arrays", "Nested structs, arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS|FEATURE_ARRAYS|FEATURE_ARRAYS_OF_ARRAYS, 25, 150);
+ createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs_instance_arrays", "Nested structs, instance arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS|FEATURE_INSTANCE_ARRAYS, 25, 125);
+ createRandomCaseGroup(randomGroup, m_testCtx, "nested_structs_arrays_instance_arrays", "Nested structs, instance arrays, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allStdLayouts|unused|allBasicTypes|matFlags|unsized|FEATURE_STRUCTS|FEATURE_ARRAYS|FEATURE_ARRAYS_OF_ARRAYS|FEATURE_INSTANCE_ARRAYS, 25, 175);
+ createRandomCaseGroup(randomGroup, m_testCtx, "all_per_block_buffers", "All random features, per-block buffers", SSBOLayoutCase::BUFFERMODE_PER_BLOCK, allButRelaxed, 50, 200);
+ createRandomCaseGroup(randomGroup, m_testCtx, "all_shared_buffer", "All random features, shared buffer", SSBOLayoutCase::BUFFERMODE_SINGLE, allButRelaxed, 50, 250);
+
+ createRandomCaseGroup(randomGroup, m_testCtx, "relaxed", "VK_KHR_relaxed_block_layout", SSBOLayoutCase::BUFFERMODE_SINGLE, allRelaxed, 100, deInt32Hash(313));
}
}
#include "vktSynchronizationUtil.hpp"
#include "vktSynchronizationOperation.hpp"
#include "vktSynchronizationOperationTestData.hpp"
+#include "vktSynchronizationOperationResources.hpp"
#include "vktExternalMemoryUtil.hpp"
#include "tcuResultCollector.hpp"
vk::VkPhysicalDevice physicalDevice,
vk::VkExternalMemoryHandleTypeFlagBitsKHR memoryHandleType,
vk::VkExternalSemaphoreHandleTypeFlagBitsKHR semaphoreHandleType,
- bool dedicated)
+ bool dedicated,
+ bool khrMemReqSupported)
{
const float priority = 0.0f;
const std::vector<vk::VkQueueFamilyProperties> queueFamilyProperties = vk::getPhysicalDeviceQueueFamilyProperties(vki, physicalDevice);
if (dedicated)
extensions.push_back("VK_KHR_dedicated_allocation");
- extensions.push_back("VK_KHR_get_memory_requirements2");
+ if (khrMemReqSupported)
+ extensions.push_back("VK_KHR_get_memory_requirements2");
+
extensions.push_back("VK_KHR_external_semaphore");
extensions.push_back("VK_KHR_external_memory");
vk::VkDevice device,
vk::VkBuffer buffer,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
- bool dedicated)
+ deUint32& exportedMemoryTypeIndex,
+ bool dedicated,
+ bool getMemReq2Supported)
{
- const vk::VkBufferMemoryRequirementsInfo2KHR requirementInfo =
- {
- vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR,
- DE_NULL,
- buffer
- };
- vk::VkMemoryDedicatedRequirementsKHR dedicatedRequirements =
+ vk::VkMemoryRequirements memoryRequirements = { 0u, 0u, 0u, };
+
+ if (getMemReq2Supported)
{
- vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
- DE_NULL,
- VK_FALSE,
- VK_FALSE
- };
- vk::VkMemoryRequirements2KHR requirements =
+ const vk::VkBufferMemoryRequirementsInfo2KHR requirementInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR,
+ DE_NULL,
+ buffer
+ };
+ vk::VkMemoryDedicatedRequirementsKHR dedicatedRequirements =
+ {
+ vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
+ DE_NULL,
+ VK_FALSE,
+ VK_FALSE
+ };
+ vk::VkMemoryRequirements2KHR requirements =
+ {
+ vk::VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
+ &dedicatedRequirements,
+ { 0u, 0u, 0u, }
+ };
+ vkd.getBufferMemoryRequirements2KHR(device, &requirementInfo, &requirements);
+
+ if (!dedicated && dedicatedRequirements.requiresDedicatedAllocation)
+ TCU_THROW(NotSupportedError, "Memory requires dedicated allocation");
+
+ memoryRequirements = requirements.memoryRequirements;
+ }
+ else
{
- vk::VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
- &dedicatedRequirements,
- { 0u, 0u, 0u, }
- };
- vkd.getBufferMemoryRequirements2KHR(device, &requirementInfo, &requirements);
+ vkd.getBufferMemoryRequirements(device, buffer, &memoryRequirements);
+ }
- if (!dedicated && dedicatedRequirements.requiresDedicatedAllocation)
- TCU_THROW(NotSupportedError, "Memory requires dedicated allocation");
- vk::Move<vk::VkDeviceMemory> memory = allocateExportableMemory(vkd, device, requirements.memoryRequirements, externalType, dedicated ? buffer : (vk::VkBuffer)0);
+ vk::Move<vk::VkDeviceMemory> memory = allocateExportableMemory(vkd, device, memoryRequirements, externalType, dedicated ? buffer : (vk::VkBuffer)0, exportedMemoryTypeIndex);
VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0u));
return de::MovePtr<vk::Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
vk::VkDevice device,
vk::VkImage image,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
- bool dedicated)
+ deUint32& exportedMemoryTypeIndex,
+ bool dedicated,
+ bool getMemReq2Supported)
{
- const vk::VkImageMemoryRequirementsInfo2KHR requirementInfo =
- {
- vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR,
- DE_NULL,
- image
- };
- vk::VkMemoryDedicatedRequirementsKHR dedicatedRequirements =
- {
- vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
- DE_NULL,
- VK_FALSE,
- VK_FALSE
- };
- vk::VkMemoryRequirements2KHR requirements =
+ vk::VkMemoryRequirements memoryRequirements = { 0u, 0u, 0u, };
+
+ if (getMemReq2Supported)
{
- vk::VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
- &dedicatedRequirements,
- { 0u, 0u, 0u, }
- };
- vkd.getImageMemoryRequirements2KHR(device, &requirementInfo, &requirements);
+ const vk::VkImageMemoryRequirementsInfo2KHR requirementInfo =
+ {
+ vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR,
+ DE_NULL,
+ image
+ };
+ vk::VkMemoryDedicatedRequirementsKHR dedicatedRequirements =
+ {
+ vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR,
+ DE_NULL,
+ VK_FALSE,
+ VK_FALSE
+ };
+ vk::VkMemoryRequirements2KHR requirements =
+ {
+ vk::VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR,
+ &dedicatedRequirements,
+ { 0u, 0u, 0u, }
+ };
+ vkd.getImageMemoryRequirements2KHR(device, &requirementInfo, &requirements);
- if (!dedicated && dedicatedRequirements.requiresDedicatedAllocation)
- TCU_THROW(NotSupportedError, "Memomry requires dedicated allocation");
+ if (!dedicated && dedicatedRequirements.requiresDedicatedAllocation)
+ TCU_THROW(NotSupportedError, "Memory requires dedicated allocation");
+ }
+ else
+ {
+ vkd.getImageMemoryRequirements(device, image, &memoryRequirements);
+ }
- vk::Move<vk::VkDeviceMemory> memory = allocateExportableMemory(vkd, device, requirements.memoryRequirements, externalType, dedicated ? image : (vk::VkImage)0);
+ vk::Move<vk::VkDeviceMemory> memory = allocateExportableMemory(vkd, device, memoryRequirements, externalType, dedicated ? image : (vk::VkImage)0, exportedMemoryTypeIndex);
VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0u));
return de::MovePtr<vk::Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
const OperationSupport& readOp,
const OperationSupport& writeOp,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
- bool dedicated)
+ deUint32& exportedMemoryTypeIndex,
+ bool dedicated,
+ bool getMemReq2Supported)
{
if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
{
};
vk::Move<vk::VkImage> image = vk::createImage(vkd, device, &createInfo);
- de::MovePtr<vk::Allocation> allocation = allocateAndBindMemory(vkd, device, *image, externalType, dedicated);
+ de::MovePtr<vk::Allocation> allocation = allocateAndBindMemory(vkd, device, *image, externalType, exportedMemoryTypeIndex, dedicated, getMemReq2Supported);
return de::MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType, resourceDesc.imageFormat, subresourceRange, subresourceLayers));
}
&queueFamilyIndices[0]
};
vk::Move<vk::VkBuffer> buffer = vk::createBuffer(vkd, device, &createInfo);
- de::MovePtr<vk::Allocation> allocation = allocateAndBindMemory(vkd, device, *buffer, externalType, dedicated);
+ de::MovePtr<vk::Allocation> allocation = allocateAndBindMemory(vkd, device, *buffer, externalType, exportedMemoryTypeIndex, dedicated, getMemReq2Supported);
return de::MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
}
vk::VkBuffer buffer,
NativeHandle& nativeHandle,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 exportedMemoryTypeIndex,
bool dedicated)
{
const vk::VkMemoryRequirements requirements = vk::getBufferMemoryRequirements(vkd, device, buffer);
vk::Move<vk::VkDeviceMemory> memory = dedicated
- ? importDedicatedMemory(vkd, device, buffer, requirements, externalType, nativeHandle)
- : importMemory(vkd, device, requirements, externalType, nativeHandle);
+ ? importDedicatedMemory(vkd, device, buffer, requirements, externalType, exportedMemoryTypeIndex, nativeHandle)
+ : importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
VK_CHECK(vkd.bindBufferMemory(device, buffer, *memory, 0u));
vk::VkImage image,
NativeHandle& nativeHandle,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 exportedMemoryTypeIndex,
bool dedicated)
{
const vk::VkMemoryRequirements requirements = vk::getImageMemoryRequirements(vkd, device, image);
vk::Move<vk::VkDeviceMemory> memory = dedicated
- ? importDedicatedMemory(vkd, device, image, requirements, externalType, nativeHandle)
- : importMemory(vkd, device, requirements, externalType, nativeHandle);
+ ? importDedicatedMemory(vkd, device, image, requirements, externalType, exportedMemoryTypeIndex, nativeHandle)
+ : importMemory(vkd, device, requirements, externalType, exportedMemoryTypeIndex, nativeHandle);
VK_CHECK(vkd.bindImageMemory(device, image, *memory, 0u));
return de::MovePtr<vk::Allocation>(new SimpleAllocation(vkd, device, memory.disown()));
const OperationSupport& writeOp,
NativeHandle& nativeHandle,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 exportedMemoryTypeIndex,
bool dedicated)
{
if (resourceDesc.type == RESOURCE_TYPE_IMAGE)
};
vk::Move<vk::VkImage> image = vk::createImage(vkd, device, &createInfo);
- de::MovePtr<vk::Allocation> allocation = importAndBindMemory(vkd, device, *image, nativeHandle, externalType, dedicated);
+ de::MovePtr<vk::Allocation> allocation = importAndBindMemory(vkd, device, *image, nativeHandle, externalType, exportedMemoryTypeIndex, dedicated);
return de::MovePtr<Resource>(new Resource(image, allocation, extent, resourceDesc.imageType, resourceDesc.imageFormat, subresourceRange, subresourceLayers));
}
&queueFamilyIndices[0]
};
vk::Move<vk::VkBuffer> buffer = vk::createBuffer(vkd, device, &createInfo);
- de::MovePtr<vk::Allocation> allocation = importAndBindMemory(vkd, device, *buffer, nativeHandle, externalType, dedicated);
+ de::MovePtr<vk::Allocation> allocation = importAndBindMemory(vkd, device, *buffer, nativeHandle, externalType, exportedMemoryTypeIndex, dedicated);
return de::MovePtr<Resource>(new Resource(resourceDesc.type, buffer, allocation, offset, size));
}
const std::vector<vk::VkQueueFamilyProperties> m_queueFamiliesA;
const std::vector<deUint32> m_queueFamilyIndicesA;
+ const bool m_getMemReq2Supported;
+
const vk::Unique<vk::VkDevice> m_deviceA;
const vk::DeviceDriver m_vkdA;
, m_physicalDeviceA (getPhysicalDevice(m_vkiA, *m_instanceA, context.getTestContext().getCommandLine()))
, m_queueFamiliesA (vk::getPhysicalDeviceQueueFamilyProperties(m_vkiA, m_physicalDeviceA))
, m_queueFamilyIndicesA (getFamilyIndices(m_queueFamiliesA))
- , m_deviceA (createDevice(m_vkiA, m_physicalDeviceA, m_config.memoryHandleType, m_config.semaphoreHandleType, m_config.dedicated))
+ , m_getMemReq2Supported (de::contains(context.getInstanceExtensions().begin(), context.getInstanceExtensions().end(), "VK_KHR_get_memory_requirements2"))
+ , m_deviceA (createDevice(m_vkiA, m_physicalDeviceA, m_config.memoryHandleType, m_config.semaphoreHandleType, m_config.dedicated, m_getMemReq2Supported))
, m_vkdA (m_vkiA, *m_deviceA)
, m_instanceB (createInstance(context.getPlatformInterface()))
, m_physicalDeviceB (getPhysicalDevice(m_vkiB, *m_instanceB, getDeviceId(m_vkiA, m_physicalDeviceA)))
, m_queueFamiliesB (vk::getPhysicalDeviceQueueFamilyProperties(m_vkiB, m_physicalDeviceB))
, m_queueFamilyIndicesB (getFamilyIndices(m_queueFamiliesB))
- , m_deviceB (createDevice(m_vkiB, m_physicalDeviceB, m_config.memoryHandleType, m_config.semaphoreHandleType, m_config.dedicated))
+ , m_deviceB (createDevice(m_vkiB, m_physicalDeviceB, m_config.memoryHandleType, m_config.semaphoreHandleType, m_config.dedicated, m_getMemReq2Supported))
, m_vkdB (m_vkiB, *m_deviceB)
, m_semaphoreHandleType (m_config.semaphoreHandleType)
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(m_vkdA, *m_deviceA, m_semaphoreHandleType));
const vk::Unique<vk::VkSemaphore> semaphoreB (createSemaphore(m_vkdB, *m_deviceB));
- const de::UniquePtr<Resource> resourceA (createResource(m_vkdA, *m_deviceA, m_config.resource, m_queueFamilyIndicesA, *m_supportReadOp, *m_supportWriteOp, m_memoryHandleType, m_config.dedicated));
+ deUint32 exportedMemoryTypeIndex = ~0U;
+ const de::UniquePtr<Resource> resourceA (createResource(m_vkdA, *m_deviceA, m_config.resource, m_queueFamilyIndicesA, *m_supportReadOp, *m_supportWriteOp, m_memoryHandleType, exportedMemoryTypeIndex, m_config.dedicated, m_getMemReq2Supported));
NativeHandle nativeMemoryHandle;
getMemoryNative(m_vkdA, *m_deviceA, resourceA->getMemory(), m_memoryHandleType, nativeMemoryHandle);
- const de::UniquePtr<Resource> resourceB (importResource(m_vkdB, *m_deviceB, m_config.resource, m_queueFamilyIndicesB, *m_supportReadOp, *m_supportWriteOp, nativeMemoryHandle, m_memoryHandleType, m_config.dedicated));
+ const de::UniquePtr<Resource> resourceB (importResource(m_vkdB, *m_deviceB, m_config.resource, m_queueFamilyIndicesB, *m_supportReadOp, *m_supportWriteOp, nativeMemoryHandle, m_memoryHandleType, exportedMemoryTypeIndex, m_config.dedicated));
try
{
#include "vktSynchronizationUtil.hpp"
#include "vktSynchronizationOperation.hpp"
#include "vktSynchronizationOperationTestData.hpp"
+#include "vktSynchronizationOperationResources.hpp"
#include "vktTestGroupUtil.hpp"
namespace vkt
--- /dev/null
+#ifndef _VKTSYNCHRONIZATIONOPERATIONRESOURCES_HPP
+#define _VKTSYNCHRONIZATIONOPERATIONRESOURCES_HPP
+/*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2017 The Khronos Group Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *//*!
+ * \file
+ * \brief Synchronization operation static data
+ *//*--------------------------------------------------------------------*/
+
+#include "tcuDefs.hpp"
+#include "vkDefs.hpp"
+#include "tcuVector.hpp"
+#include "vktSynchronizationOperation.hpp"
+
+namespace vkt
+{
+namespace synchronization
+{
+
+static const ResourceDescription s_resources[] =
+{
+ { RESOURCE_TYPE_BUFFER, tcu::IVec4( 0x4000, 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 }, // 16 KiB (min max UBO range)
+ { RESOURCE_TYPE_BUFFER, tcu::IVec4(0x40000, 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 }, // 256 KiB
+
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 0, 0, 0), vk::VK_IMAGE_TYPE_1D, vk::VK_FORMAT_R32_UINT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
+
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R8_UNORM, vk::VK_IMAGE_ASPECT_COLOR_BIT },
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R16_UINT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R8G8B8A8_UNORM, vk::VK_IMAGE_ASPECT_COLOR_BIT },
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R16G16B16A16_UINT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R32G32B32A32_SFLOAT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
+
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(64, 64, 8, 0), vk::VK_IMAGE_TYPE_3D, vk::VK_FORMAT_R32_SFLOAT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
+
+ // \note Mixed depth/stencil formats add complexity in image<->buffer transfers (packing), so we just avoid them here
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_D16_UNORM, vk::VK_IMAGE_ASPECT_DEPTH_BIT },
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_D32_SFLOAT, vk::VK_IMAGE_ASPECT_DEPTH_BIT },
+ { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_S8_UINT, vk::VK_IMAGE_ASPECT_STENCIL_BIT },
+
+ // \note Special resources, when test case isn't strictly a copy and comparison of some data
+ { RESOURCE_TYPE_INDIRECT_BUFFER_DRAW, tcu::IVec4(sizeof(vk::VkDrawIndirectCommand), 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 },
+ { RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED, tcu::IVec4(sizeof(vk::VkDrawIndexedIndirectCommand), 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 },
+ { RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH, tcu::IVec4(sizeof(vk::VkDispatchIndirectCommand), 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 },
+};
+
+} // synchronization
+} // vkt
+
+#endif // _VKTSYNCHRONIZATIONOPERATIONRESOURCES_HPP
#include "vktSynchronizationUtil.hpp"
#include "vktSynchronizationOperation.hpp"
#include "vktSynchronizationOperationTestData.hpp"
+#include "vktSynchronizationOperationResources.hpp"
namespace vkt
{
namespace synchronization
{
-static const ResourceDescription s_resources[] =
-{
- { RESOURCE_TYPE_BUFFER, tcu::IVec4( 0x4000, 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 }, // 16 KiB (min max UBO range)
- { RESOURCE_TYPE_BUFFER, tcu::IVec4(0x40000, 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 }, // 256 KiB
-
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 0, 0, 0), vk::VK_IMAGE_TYPE_1D, vk::VK_FORMAT_R32_UINT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
-
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R8_UNORM, vk::VK_IMAGE_ASPECT_COLOR_BIT },
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R16_UINT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R8G8B8A8_UNORM, vk::VK_IMAGE_ASPECT_COLOR_BIT },
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R16G16B16A16_UINT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_R32G32B32A32_SFLOAT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
-
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(64, 64, 8, 0), vk::VK_IMAGE_TYPE_3D, vk::VK_FORMAT_R32_SFLOAT, vk::VK_IMAGE_ASPECT_COLOR_BIT },
-
- // \note Mixed depth/stencil formats add complexity in image<->buffer transfers (packing), so we just avoid them here
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_D16_UNORM, vk::VK_IMAGE_ASPECT_DEPTH_BIT },
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_D32_SFLOAT, vk::VK_IMAGE_ASPECT_DEPTH_BIT },
- { RESOURCE_TYPE_IMAGE, tcu::IVec4(128, 128, 0, 0), vk::VK_IMAGE_TYPE_2D, vk::VK_FORMAT_S8_UINT, vk::VK_IMAGE_ASPECT_STENCIL_BIT },
-
- // \note Special resources, when test case isn't strictly a copy and comparison of some data
- { RESOURCE_TYPE_INDIRECT_BUFFER_DRAW, tcu::IVec4(sizeof(vk::VkDrawIndirectCommand), 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 },
- { RESOURCE_TYPE_INDIRECT_BUFFER_DRAW_INDEXED, tcu::IVec4(sizeof(vk::VkDrawIndexedIndirectCommand), 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 },
- { RESOURCE_TYPE_INDIRECT_BUFFER_DISPATCH, tcu::IVec4(sizeof(vk::VkDispatchIndirectCommand), 0, 0, 0), vk::VK_IMAGE_TYPE_LAST, vk::VK_FORMAT_UNDEFINED, (vk::VkImageAspectFlags)0 },
-};
-
static const OperationName s_writeOps[] =
{
OPERATION_NAME_WRITE_FILL_BUFFER,
DE_ASSERT(faceNdx < 6);
- faceBitmap = faceBitmap | (deUint8) (1U << faceNdx);
+ faceBitmap = (deUint8)(faceBitmap | (deUint8) (1U << faceNdx));
}
}
vk::VkDevice device,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
- vk::VkBuffer buffer)
+ vk::VkBuffer buffer,
+ deUint32& exportedMemoryTypeIndex)
{
+ exportedMemoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
const vk::VkMemoryDedicatedAllocateInfoKHR dedicatedInfo =
{
vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
&exportInfo,
requirements.size,
- chooseMemoryType(requirements.memoryTypeBits)
+ exportedMemoryTypeIndex
};
return vk::allocateMemory(vkd, device, &info);
}
vk::VkDevice device,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
- vk::VkImage image)
+ vk::VkImage image,
+ deUint32& exportedMemoryTypeIndex)
{
+ exportedMemoryTypeIndex = chooseMemoryType(requirements.memoryTypeBits);
const vk::VkMemoryDedicatedAllocateInfoKHR dedicatedInfo =
{
vk::VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR,
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
&exportInfo,
requirements.size,
- chooseMemoryType(requirements.memoryTypeBits)
+ exportedMemoryTypeIndex
};
return vk::allocateMemory(vkd, device, &info);
}
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
bool hostVisible,
- vk::VkBuffer buffer)
+ vk::VkBuffer buffer,
+ deUint32& exportedMemoryTypeIndex)
{
const vk::VkPhysicalDeviceMemoryProperties properties = vk::getPhysicalDeviceMemoryProperties(vki, physicalDevice);
requirements.size,
memoryTypeIndex
};
+
+ exportedMemoryTypeIndex = memoryTypeIndex;
return vk::allocateMemory(vkd, device, &info);
}
}
vk::VkImage image,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 memoryTypeIndex,
NativeHandle& handle)
{
const bool isDedicated = !!buffer || !!image;
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
(isDedicated ? (const void*)&dedicatedInfo : (const void*)&importInfo),
requirements.size,
- chooseMemoryType(requirements.memoryTypeBits)
+ (memoryTypeIndex == ~0U) ? chooseMemoryType(requirements.memoryTypeBits) : memoryTypeIndex
};
vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &info));
vk::VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
(isDedicated ? (const void*)&dedicatedInfo : (const void*)&importInfo),
requirements.size,
- chooseMemoryType(requirements.memoryTypeBits)
+ (memoryTypeIndex == ~0U) ? chooseMemoryType(requirements.memoryTypeBits) : memoryTypeIndex
};
vk::Move<vk::VkDeviceMemory> memory (vk::allocateMemory(vkd, device, &info));
vk::VkDevice device,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 memoryTypeIndex,
NativeHandle& handle)
{
- return importMemory(vkd, device, (vk::VkBuffer)0, (vk::VkImage)0, requirements, externalType, handle);
+ return importMemory(vkd, device, (vk::VkBuffer)0, (vk::VkImage)0, requirements, externalType, memoryTypeIndex, handle);
}
vk::Move<vk::VkDeviceMemory> importDedicatedMemory (const vk::DeviceInterface& vkd,
vk::VkBuffer buffer,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 memoryTypeIndex,
NativeHandle& handle)
{
- return importMemory(vkd, device, buffer, (vk::VkImage)0, requirements, externalType, handle);
+ return importMemory(vkd, device, buffer, (vk::VkImage)0, requirements, externalType, memoryTypeIndex, handle);
}
vk::Move<vk::VkDeviceMemory> importDedicatedMemory (const vk::DeviceInterface& vkd,
vk::VkImage image,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 memoryTypeIndex,
NativeHandle& handle)
{
- return importMemory(vkd, device, (vk::VkBuffer)0, image, requirements, externalType, handle);
+ return importMemory(vkd, device, (vk::VkBuffer)0, image, requirements, externalType, memoryTypeIndex, handle);
}
vk::Move<vk::VkBuffer> createExternalBuffer (const vk::DeviceInterface& vkd,
vk::Move<vk::VkDeviceMemory> allocateExportableMemory (const vk::DeviceInterface& vkd,
vk::VkDevice device,
const vk::VkMemoryRequirements& requirements,
- vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType);
+ vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32& exportedMemoryTypeIndex);
// If buffer is not null use dedicated allocation
vk::Move<vk::VkDeviceMemory> allocateExportableMemory (const vk::DeviceInterface& vkd,
vk::VkDevice device,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
- vk::VkBuffer buffer);
+ vk::VkBuffer buffer,
+ deUint32& exportedMemoryTypeIndex);
// If image is not null use dedicated allocation
vk::Move<vk::VkDeviceMemory> allocateExportableMemory (const vk::DeviceInterface& vkd,
vk::VkDevice device,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
- vk::VkImage image);
+ vk::VkImage image,
+ deUint32& exportedMemoryTypeIndex);
// \note hostVisible argument is strict. Setting it to false will cause NotSupportedError to be thrown if non-host visible memory doesn't exist.
// If buffer is not null use dedicated allocation
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
bool hostVisible,
- vk::VkBuffer buffer);
+ vk::VkBuffer buffer,
+ deUint32& exportedMemoryTypeIndex);
vk::Move<vk::VkDeviceMemory> importMemory (const vk::DeviceInterface& vkd,
vk::VkDevice device,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 memoryTypeIndex,
NativeHandle& handle);
vk::Move<vk::VkDeviceMemory> importDedicatedMemory (const vk::DeviceInterface& vkd,
vk::VkBuffer buffer,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 memoryTypeIndex,
NativeHandle& handle);
vk::Move<vk::VkDeviceMemory> importDedicatedMemory (const vk::DeviceInterface& vkd,
vk::VkImage image,
const vk::VkMemoryRequirements& requirements,
vk::VkExternalMemoryHandleTypeFlagBitsKHR externalType,
+ deUint32 memoryTypeIndex,
NativeHandle& handle);
vk::Move<vk::VkBuffer> createExternalBuffer (const vk::DeviceInterface& vkd,
dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.std430_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.std430_instance_array
dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.std430_instance_array_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block_instance_array
+dEQP-VK.ssbo.layout.multi_basic_types.per_block_buffer.relaxed_block_instance_array_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std140
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std140_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std140_instance_array
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std430_comp_access
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std430_instance_array
dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.std430_instance_array_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block_comp_access
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block_instance_array
+dEQP-VK.ssbo.layout.multi_basic_types.single_buffer.relaxed_block_instance_array_comp_access
dEQP-VK.ssbo.layout.multi_nested_struct.per_block_buffer.std140
dEQP-VK.ssbo.layout.multi_nested_struct.per_block_buffer.std140_comp_access
dEQP-VK.ssbo.layout.multi_nested_struct.per_block_buffer.std140_instance_array
dEQP-VK.ssbo.layout.random.all_shared_buffer.47
dEQP-VK.ssbo.layout.random.all_shared_buffer.48
dEQP-VK.ssbo.layout.random.all_shared_buffer.49
+dEQP-VK.ssbo.layout.random.relaxed.0
+dEQP-VK.ssbo.layout.random.relaxed.1
+dEQP-VK.ssbo.layout.random.relaxed.2
+dEQP-VK.ssbo.layout.random.relaxed.3
+dEQP-VK.ssbo.layout.random.relaxed.4
+dEQP-VK.ssbo.layout.random.relaxed.5
+dEQP-VK.ssbo.layout.random.relaxed.6
+dEQP-VK.ssbo.layout.random.relaxed.7
+dEQP-VK.ssbo.layout.random.relaxed.8
+dEQP-VK.ssbo.layout.random.relaxed.9
+dEQP-VK.ssbo.layout.random.relaxed.10
+dEQP-VK.ssbo.layout.random.relaxed.11
+dEQP-VK.ssbo.layout.random.relaxed.12
+dEQP-VK.ssbo.layout.random.relaxed.13
+dEQP-VK.ssbo.layout.random.relaxed.14
+dEQP-VK.ssbo.layout.random.relaxed.15
+dEQP-VK.ssbo.layout.random.relaxed.16
+dEQP-VK.ssbo.layout.random.relaxed.17
+dEQP-VK.ssbo.layout.random.relaxed.18
+dEQP-VK.ssbo.layout.random.relaxed.19
+dEQP-VK.ssbo.layout.random.relaxed.20
+dEQP-VK.ssbo.layout.random.relaxed.21
+dEQP-VK.ssbo.layout.random.relaxed.22
+dEQP-VK.ssbo.layout.random.relaxed.23
+dEQP-VK.ssbo.layout.random.relaxed.24
+dEQP-VK.ssbo.layout.random.relaxed.25
+dEQP-VK.ssbo.layout.random.relaxed.26
+dEQP-VK.ssbo.layout.random.relaxed.27
+dEQP-VK.ssbo.layout.random.relaxed.28
+dEQP-VK.ssbo.layout.random.relaxed.29
+dEQP-VK.ssbo.layout.random.relaxed.30
+dEQP-VK.ssbo.layout.random.relaxed.31
+dEQP-VK.ssbo.layout.random.relaxed.32
+dEQP-VK.ssbo.layout.random.relaxed.33
+dEQP-VK.ssbo.layout.random.relaxed.34
+dEQP-VK.ssbo.layout.random.relaxed.35
+dEQP-VK.ssbo.layout.random.relaxed.36
+dEQP-VK.ssbo.layout.random.relaxed.37
+dEQP-VK.ssbo.layout.random.relaxed.38
+dEQP-VK.ssbo.layout.random.relaxed.39
+dEQP-VK.ssbo.layout.random.relaxed.40
+dEQP-VK.ssbo.layout.random.relaxed.41
+dEQP-VK.ssbo.layout.random.relaxed.42
+dEQP-VK.ssbo.layout.random.relaxed.43
+dEQP-VK.ssbo.layout.random.relaxed.44
+dEQP-VK.ssbo.layout.random.relaxed.45
+dEQP-VK.ssbo.layout.random.relaxed.46
+dEQP-VK.ssbo.layout.random.relaxed.47
+dEQP-VK.ssbo.layout.random.relaxed.48
+dEQP-VK.ssbo.layout.random.relaxed.49
+dEQP-VK.ssbo.layout.random.relaxed.50
+dEQP-VK.ssbo.layout.random.relaxed.51
+dEQP-VK.ssbo.layout.random.relaxed.52
+dEQP-VK.ssbo.layout.random.relaxed.53
+dEQP-VK.ssbo.layout.random.relaxed.54
+dEQP-VK.ssbo.layout.random.relaxed.55
+dEQP-VK.ssbo.layout.random.relaxed.56
+dEQP-VK.ssbo.layout.random.relaxed.57
+dEQP-VK.ssbo.layout.random.relaxed.58
+dEQP-VK.ssbo.layout.random.relaxed.59
+dEQP-VK.ssbo.layout.random.relaxed.60
+dEQP-VK.ssbo.layout.random.relaxed.61
+dEQP-VK.ssbo.layout.random.relaxed.62
+dEQP-VK.ssbo.layout.random.relaxed.63
+dEQP-VK.ssbo.layout.random.relaxed.64
+dEQP-VK.ssbo.layout.random.relaxed.65
+dEQP-VK.ssbo.layout.random.relaxed.66
+dEQP-VK.ssbo.layout.random.relaxed.67
+dEQP-VK.ssbo.layout.random.relaxed.68
+dEQP-VK.ssbo.layout.random.relaxed.69
+dEQP-VK.ssbo.layout.random.relaxed.70
+dEQP-VK.ssbo.layout.random.relaxed.71
+dEQP-VK.ssbo.layout.random.relaxed.72
+dEQP-VK.ssbo.layout.random.relaxed.73
+dEQP-VK.ssbo.layout.random.relaxed.74
+dEQP-VK.ssbo.layout.random.relaxed.75
+dEQP-VK.ssbo.layout.random.relaxed.76
+dEQP-VK.ssbo.layout.random.relaxed.77
+dEQP-VK.ssbo.layout.random.relaxed.78
+dEQP-VK.ssbo.layout.random.relaxed.79
+dEQP-VK.ssbo.layout.random.relaxed.80
+dEQP-VK.ssbo.layout.random.relaxed.81
+dEQP-VK.ssbo.layout.random.relaxed.82
+dEQP-VK.ssbo.layout.random.relaxed.83
+dEQP-VK.ssbo.layout.random.relaxed.84
+dEQP-VK.ssbo.layout.random.relaxed.85
+dEQP-VK.ssbo.layout.random.relaxed.86
+dEQP-VK.ssbo.layout.random.relaxed.87
+dEQP-VK.ssbo.layout.random.relaxed.88
+dEQP-VK.ssbo.layout.random.relaxed.89
+dEQP-VK.ssbo.layout.random.relaxed.90
+dEQP-VK.ssbo.layout.random.relaxed.91
+dEQP-VK.ssbo.layout.random.relaxed.92
+dEQP-VK.ssbo.layout.random.relaxed.93
+dEQP-VK.ssbo.layout.random.relaxed.94
+dEQP-VK.ssbo.layout.random.relaxed.95
+dEQP-VK.ssbo.layout.random.relaxed.96
+dEQP-VK.ssbo.layout.random.relaxed.97
+dEQP-VK.ssbo.layout.random.relaxed.98
+dEQP-VK.ssbo.layout.random.relaxed.99
dEQP-VK.query_pool.occlusion_query.basic_conservative
dEQP-VK.query_pool.occlusion_query.basic_precise
dEQP-VK.query_pool.occlusion_query.get_results_conservative_size_32_wait_queue_without_availability_draw_points
else:
print "Skip: %s" % step.getName()
-def runRecipe (steps):
- allSteps = PREREQUISITES + steps + POST_CHECKS
- runSteps(allSteps)
-
COMMON_GCC_CFLAGS = ["-Werror"]
COMMON_CLANG_CFLAGS = COMMON_GCC_CFLAGS + ["-Wno-error=unused-command-line-argument"]
GCC_32BIT_CFLAGS = COMMON_GCC_CFLAGS + ["-m32"]
dest="dumpRecipes",
action="store_true",
help="Print out recipes that have any available actions")
+ parser.add_argument("--skip-prerequisites",
+ dest="skipPrerequisites",
+ action="store_true",
+ help="Skip external dependency fetch")
+
return parser.parse_args()
if __name__ == "__main__":
print "Running %s" % name
- runRecipe(steps)
+ allSteps = (PREREQUISITES if (args.skipPrerequisites == False) else []) + steps + POST_CHECKS
+ runSteps(allSteps)
print "All steps completed successfully"