Stage stage;
UpdateAfterBind uab;
deUint32 seed;
+ VkFlags allShaderStages;
+ VkFlags allPipelineStages;
};
+ static void getNeededFeatures(const Context& context,
+ VkPhysicalDeviceFeatures2& features,
+ VkPhysicalDeviceInlineUniformBlockFeaturesEXT& inlineUniformFeatures,
+ VkPhysicalDeviceDescriptorIndexingFeatures& indexingFeatures)
+ {
+ inlineUniformFeatures = initVulkanStructure();
+ indexingFeatures = initVulkanStructure();
+ features = initVulkanStructure();
+
+ void** nextPtr = &features.pNext;
+ if (context.isDeviceFunctionalitySupported("VK_EXT_descriptor_indexing"))
+ addToChainVulkanStructure(&nextPtr, indexingFeatures);
+ if (context.isDeviceFunctionalitySupported("VK_EXT_inline_uniform_block"))
+ addToChainVulkanStructure(&nextPtr, inlineUniformFeatures);
+
+ context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
+ }
class RandomLayout
{
VkPhysicalDeviceProperties2 properties;
deMemset(&properties, 0, sizeof(properties));
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
+ void ** pNextTail = &properties.pNext;
- if (isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_EXT_inline_uniform_block"))
+ if (context.isDeviceFunctionalitySupported("VK_EXT_inline_uniform_block"))
{
- properties.pNext = &inlineUniformProperties;
+ *pNextTail = &inlineUniformProperties;
+ pNextTail = &inlineUniformProperties.pNext;
}
- if (isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_NV_ray_tracing"))
++ if (context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
+ {
+ *pNextTail = &rayTracingProperties;
+ pNextTail = &rayTracingProperties.pNext;
+ }
+ *pNextTail = NULL;
+
context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties);
- VkPhysicalDeviceInlineUniformBlockFeaturesEXT inlineUniformFeatures;
- deMemset(&inlineUniformFeatures, 0, sizeof(inlineUniformFeatures));
- inlineUniformFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT;
-
- VkPhysicalDeviceDescriptorIndexingFeatures indexingFeatures;
- deMemset(&indexingFeatures, 0, sizeof(indexingFeatures));
- indexingFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES;
-
VkPhysicalDeviceFeatures2 features;
- deMemset(&features, 0, sizeof(features));
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
-
- const bool descriptorIndexing = context.isDescriptorIndexingSupported();
-
- if (descriptorIndexing &&
- isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_EXT_inline_uniform_block"))
- {
- indexingFeatures.pNext = &inlineUniformFeatures;
- features.pNext = &indexingFeatures;
- }
- else if (descriptorIndexing)
- {
- features.pNext = &indexingFeatures;
- }
- else if (isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_EXT_inline_uniform_block"))
- {
- features.pNext = &inlineUniformFeatures;
- }
+ VkPhysicalDeviceDescriptorIndexingFeatures indexingFeatures;
+ VkPhysicalDeviceInlineUniformBlockFeaturesEXT inlineUniformFeatures;
+ getNeededFeatures(context, features, inlineUniformFeatures, indexingFeatures);
context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features);
if (m_data.stage == STAGE_VERTEX && !features.features.vertexPipelineStoresAndAtomics)
{
return TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
}
-
+ else if (m_data.stage == STAGE_RAYGEN &&
- !isDeviceExtensionSupported(context.getUsedApiVersion(), context.getDeviceExtensions(), "VK_NV_ray_tracing"))
++ !context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
+ {
+ return TCU_THROW(NotSupportedError, "Ray tracing is not supported");
+ }
if ((m_data.indexType == INDEX_TYPE_PUSHCONSTANT ||
m_data.indexType == INDEX_TYPE_DEPENDENT ||
m_data.indexType == INDEX_TYPE_RUNTIME_SIZE) &&
deMemset(&properties, 0, sizeof(properties));
properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
- if (isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_NV_ray_tracing"))
+ VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
+ deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
+ rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
+
++ if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
+ {
+ properties.pNext = &rayTracingProperties;
+ }
+
m_context.getInstanceInterface().getPhysicalDeviceProperties2(m_context.getPhysicalDevice(), &properties);
+ VkPhysicalDeviceFeatures2 features;
VkPhysicalDeviceInlineUniformBlockFeaturesEXT inlineUniformFeatures;
- deMemset(&inlineUniformFeatures, 0, sizeof(inlineUniformFeatures));
- inlineUniformFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_INLINE_UNIFORM_BLOCK_FEATURES_EXT;
-
VkPhysicalDeviceDescriptorIndexingFeatures indexingFeatures;
- deMemset(&indexingFeatures, 0, sizeof(indexingFeatures));
- indexingFeatures.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT;
-
- VkPhysicalDeviceFeatures2 features;
- deMemset(&features, 0, sizeof(features));
- features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
-
- const bool descriptorIndexing = m_context.isDescriptorIndexingSupported();
-
- if (descriptorIndexing &&
- isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_EXT_inline_uniform_block"))
- {
- indexingFeatures.pNext = &inlineUniformFeatures;
- features.pNext = &indexingFeatures;
- }
- else if (descriptorIndexing)
- {
- features.pNext = &indexingFeatures;
- }
- else if (isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_EXT_inline_uniform_block"))
- {
- features.pNext = &inlineUniformFeatures;
- }
-
- m_context.getInstanceInterface().getPhysicalDeviceFeatures2(m_context.getPhysicalDevice(), &features);
+ getNeededFeatures(m_context, features, inlineUniformFeatures, indexingFeatures);
deRandom rnd;
deRandom_init(&rnd, m_data.seed);
const VkDevice device = m_context.getDevice();
Allocator& allocator = m_context.getDefaultAllocator();
MemoryRequirement memoryDeviceAddress = m_data.storageClass == SC_PHYSICAL_STORAGE_BUFFER &&
- m_context.isBufferDeviceAddressKHRSupported() ? MemoryRequirement::DeviceAddress : MemoryRequirement::Any;
+ m_context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address") ? MemoryRequirement::DeviceAddress : MemoryRequirement::Any;
+ qpTestResult finalres = QP_TEST_RESULT_PASS;
+ tcu::TestLog& log = m_context.getTestContext().getLog();
deRandom rnd;
deRandom_init(&rnd, 1234);
}
else
{
- bufferSizes[4] = sizeof(VkDeviceAddress)*4;
+ dims[0].rows = M;
+ dims[0].cols = N;
+ dims[1].rows = M;
+ dims[1].cols = N;
+ dims[2].rows = M;
+ dims[2].cols = N;
+ dims[3].rows = M;
+ dims[3].cols = N;
}
- try
+ VkComponentTypeNV dataTypes[4];
+ size_t elementSize[4];
+ VkDeviceSize bufferSizes[5];
+ de::MovePtr<BufferWithMemory> buffers[5];
+ vk::VkDescriptorBufferInfo bufferDescriptors[5];
+ deUint32 strides[4]; // in elements
+ deUint32 totalElements[4];
+
+ for (deUint32 i = 0; i < 5; ++i)
{
- buffers[i] = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
- vk, device, allocator, makeBufferCreateInfo(bufferSizes[i], VK_BUFFER_USAGE_STORAGE_BUFFER_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT|VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT),
- MemoryRequirement::HostVisible | MemoryRequirement::Cached | MemoryRequirement::Coherent | memoryDeviceAddress));
+ if (i < 4)
+ {
+ // A/B use input type, C/D use output type
+ dataTypes[i] = (i < 2) ? m_data.inputType : m_data.outputType;
+ elementSize[i] = componentTypeInfo[dataTypes[i]].bits / 8;
+
+ strides[i] = (m_data.colMajor ? dims[i].rows : dims[i].cols) * m_data.subgroupsPerWorkgroupX * m_data.workgroupsX;
+ totalElements[i] = strides[i] * (m_data.colMajor ? dims[i].cols : dims[i].rows) * m_data.subgroupsPerWorkgroupY * m_data.workgroupsY;
+
+ bufferSizes[i] = totalElements[i] * elementSize[i];
+ }
+ else
+ {
+ bufferSizes[4] = sizeof(VkDeviceAddress)*4;
+ }
+
+ try
+ {
+ buffers[i] = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
+ vk, device, allocator, makeBufferCreateInfo(bufferSizes[i], VK_BUFFER_USAGE_STORAGE_BUFFER_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT|VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT),
+ MemoryRequirement::HostVisible | MemoryRequirement::Cached | MemoryRequirement::Coherent | memoryDeviceAddress));
+ }
+ catch (const tcu::NotSupportedError&)
+ {
+ buffers[i] = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
+ vk, device, allocator, makeBufferCreateInfo(bufferSizes[i], VK_BUFFER_USAGE_STORAGE_BUFFER_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT|VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT),
+ MemoryRequirement::HostVisible | memoryDeviceAddress));
+ }
+
+ bufferDescriptors[i] = makeDescriptorBufferInfo(**buffers[i], 0, bufferSizes[i]);
}
- catch (const tcu::NotSupportedError&)
+
+ void *ptrs[5];
+ for (deUint32 i = 0; i < 5; ++i)
{
- buffers[i] = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
- vk, device, allocator, makeBufferCreateInfo(bufferSizes[i], VK_BUFFER_USAGE_STORAGE_BUFFER_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT|VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT),
- MemoryRequirement::HostVisible | memoryDeviceAddress));
+ ptrs[i] = buffers[i]->getAllocation().getHostPtr();
}
- bufferDescriptors[i] = makeDescriptorBufferInfo(**buffers[i], 0, bufferSizes[i]);
- }
+ vk::DescriptorSetLayoutBuilder layoutBuilder;
- void *ptrs[5];
- for (deUint32 i = 0; i < 5; ++i)
- {
- ptrs[i] = buffers[i]->getAllocation().getHostPtr();
- }
+ layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
+ layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
+ layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
+ layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
+ layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
- vk::DescriptorSetLayoutBuilder layoutBuilder;
+ vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(layoutBuilder.build(vk, device));
- layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
- layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
- layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
- layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
- layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, allShaderStages);
+ vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 5u)
+ .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
+ vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
- vk::Unique<vk::VkDescriptorSetLayout> descriptorSetLayout(layoutBuilder.build(vk, device));
+ vk::DescriptorSetUpdateBuilder setUpdateBuilder;
+ if (m_data.storageClass == SC_PHYSICAL_STORAGE_BUFFER)
+ {
- const bool useKHR = m_context.isBufferDeviceAddressKHRSupported();
++ const bool useKHR = m_context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address");
- vk::Unique<vk::VkDescriptorPool> descriptorPool(vk::DescriptorPoolBuilder()
- .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 5u)
- .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
- vk::Unique<vk::VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
+ VkBufferDeviceAddressInfo info =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0, // VkBuffer buffer
+ };
+ VkDeviceAddress *addrsInMemory = (VkDeviceAddress *)ptrs[4];
+ for (deUint32 i = 0; i < 4; ++i)
+ {
+ info.buffer = **buffers[i];
+ VkDeviceAddress addr;
+ if (useKHR)
+ addr = vk.getBufferDeviceAddress(device, &info);
+ else
+ addr = vk.getBufferDeviceAddressEXT(device, &info);
+ addrsInMemory[i] = addr;
+ }
+ setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(4),
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[4]);
+ }
+ else
+ {
+ setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0),
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[0]);
+ setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1),
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[1]);
+ setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2),
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[2]);
+ setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(3),
+ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[3]);
+ }
- vk::DescriptorSetUpdateBuilder setUpdateBuilder;
- if (m_data.storageClass == SC_PHYSICAL_STORAGE_BUFFER)
- {
- const bool useKHR = m_context.isDeviceFunctionalitySupported("VK_KHR_buffer_device_address");
+ setUpdateBuilder.update(vk, device);
+
+ const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
+ DE_NULL, // pNext
+ (VkPipelineLayoutCreateFlags)0,
+ 1, // setLayoutCount
+ &descriptorSetLayout.get(), // pSetLayouts
+ 0u, // pushConstantRangeCount
+ DE_NULL, // pPushConstantRanges
+ };
+
+ Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
+
+ Move<VkPipeline> pipeline;
+
+ VkPipelineBindPoint bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
+
+ const deUint32 specData[9] =
+ {
+ subgroupProperties.subgroupSize * m_data.subgroupsPerWorkgroupX,
+ m_data.subgroupsPerWorkgroupY,
+ strides[0],
+ strides[1],
+ strides[2],
+ strides[3],
+ M,
+ N,
+ K,
+ };
- VkBufferDeviceAddressInfo info =
+ const vk::VkSpecializationMapEntry entries[9] =
{
- VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO, // VkStructureType sType;
- DE_NULL, // const void* pNext;
- 0, // VkBuffer buffer
+ {0, (deUint32)(sizeof(deUint32) * 0), sizeof(deUint32)},
+ {1, (deUint32)(sizeof(deUint32) * 1), sizeof(deUint32)},
+ {2, (deUint32)(sizeof(deUint32) * 2), sizeof(deUint32)},
+ {3, (deUint32)(sizeof(deUint32) * 3), sizeof(deUint32)},
+ {4, (deUint32)(sizeof(deUint32) * 4), sizeof(deUint32)},
+ {5, (deUint32)(sizeof(deUint32) * 5), sizeof(deUint32)},
+ {6, (deUint32)(sizeof(deUint32) * 6), sizeof(deUint32)},
+ {7, (deUint32)(sizeof(deUint32) * 7), sizeof(deUint32)},
+ {8, (deUint32)(sizeof(deUint32) * 8), sizeof(deUint32)},
};
- VkDeviceAddress *addrsInMemory = (VkDeviceAddress *)ptrs[4];
+
+ const vk::VkSpecializationInfo specInfo =
+ {
+ 9, // mapEntryCount
+ entries, // pMapEntries
+ sizeof(specData), // dataSize
+ specData // pData
+ };
+
for (deUint32 i = 0; i < 4; ++i)
+ for (deUint32 j = 0; j < totalElements[i]; ++j)
+ {
+ if (isFloatType(dataTypes[i]))
+ {
+ if (m_data.testType != TT_MATRIXMULADD &&
+ m_data.testType != TT_MATRIXMULADD_ARRAY)
+ setDataFloat(ptrs[i], dataTypes[i], j, ((float)(deRandom_getUint32(&rnd) & 0xff) - 64.0f)/2.0f);
+ else
+ setDataFloat(ptrs[i], dataTypes[i], j, ((float)(deRandom_getUint32(&rnd) & 0xf) - 4.0f)/2.0f);
+ }
+ else
+ setDataInt(ptrs[i], dataTypes[i], j, (deRandom_getUint32(&rnd) & 0xff) - 128);
+ }
+
+ flushAlloc(vk, device, buffers[0]->getAllocation());
+ flushAlloc(vk, device, buffers[1]->getAllocation());
+ flushAlloc(vk, device, buffers[2]->getAllocation());
+ flushAlloc(vk, device, buffers[3]->getAllocation());
+
+ const Unique<VkShaderModule> shader (createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
+
+ const VkPipelineShaderStageCreateInfo shaderCreateInfo =
{
- info.buffer = **buffers[i];
- VkDeviceAddress addr;
- if (useKHR)
- addr = vk.getBufferDeviceAddress(device, &info);
- else
- addr = vk.getBufferDeviceAddressEXT(device, &info);
- addrsInMemory[i] = addr;
- }
- setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(4),
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[4]);
- }
- else
- {
- setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(0),
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[0]);
- setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(1),
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[1]);
- setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(2),
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[2]);
- setUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(3),
- VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &bufferDescriptors[3]);
- }
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ DE_NULL,
+ (VkPipelineShaderStageCreateFlags)0,
+ VK_SHADER_STAGE_COMPUTE_BIT, // stage
+ *shader, // shader
+ "main",
+ &specInfo, // pSpecializationInfo
+ };
- setUpdateBuilder.update(vk, device);
+ const VkComputePipelineCreateInfo pipelineCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,
+ DE_NULL,
+ 0u, // flags
+ shaderCreateInfo, // cs
+ *pipelineLayout, // layout
+ (vk::VkPipeline)0, // basePipelineHandle
+ 0u, // basePipelineIndex
+ };
+ pipeline = createComputePipeline(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
- const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
- {
- VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
- DE_NULL, // pNext
- (VkPipelineLayoutCreateFlags)0,
- 1, // setLayoutCount
- &descriptorSetLayout.get(), // pSetLayouts
- 0u, // pushConstantRangeCount
- DE_NULL, // pPushConstantRanges
- };
+ const VkQueue queue = m_context.getUniversalQueue();
+ Move<VkCommandPool> cmdPool = createCommandPool(vk, device, 0, m_context.getUniversalQueueFamilyIndex());
+ Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
- Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
+ beginCommandBuffer(vk, *cmdBuffer, 0u);
- Move<VkPipeline> pipeline;
+ vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
+ vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
- VkPipelineBindPoint bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
+ vk.cmdDispatch(*cmdBuffer, m_data.workgroupsX, m_data.workgroupsY, 1);
- const deUint32 specData[6] =
- {
- subgroupProperties.subgroupSize * m_data.subgroupsPerWorkgroupX,
- m_data.subgroupsPerWorkgroupY,
- strides[0],
- strides[1],
- strides[2],
- strides[3],
- };
+ endCommandBuffer(vk, *cmdBuffer);
- const vk::VkSpecializationMapEntry entries[6] =
- {
- {0, (deUint32)(sizeof(deUint32) * 0), sizeof(deUint32)},
- {1, (deUint32)(sizeof(deUint32) * 1), sizeof(deUint32)},
- {2, (deUint32)(sizeof(deUint32) * 2), sizeof(deUint32)},
- {3, (deUint32)(sizeof(deUint32) * 3), sizeof(deUint32)},
- {4, (deUint32)(sizeof(deUint32) * 4), sizeof(deUint32)},
- {5, (deUint32)(sizeof(deUint32) * 5), sizeof(deUint32)},
- };
+ submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
- const vk::VkSpecializationInfo specInfo =
- {
- 6, // mapEntryCount
- entries, // pMapEntries
- sizeof(specData), // dataSize
- specData // pData
- };
+ invalidateAlloc(vk, device, buffers[3]->getAllocation());
- for (deUint32 i = 0; i < 4; ++i)
- for (deUint32 j = 0; j < totalElements[i]; ++j)
+ qpTestResult res = QP_TEST_RESULT_PASS;
+
+ if (isFloatType(dataTypes[0]))
{
if (m_data.testType != TT_MATRIXMULADD &&
m_data.testType != TT_MATRIXMULADD_ARRAY)