1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2017-2019 The Khronos Group Inc.
6 * Copyright (c) 2018-2020 NVIDIA Corporation
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Vulkan robustness2 tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktRobustnessExtsTests.hpp"
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkDeviceUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkObjUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vktRobustnessUtil.hpp"
39 #include "vktTestGroupUtil.hpp"
40 #include "vktTestCase.hpp"
45 #include "deSharedPtr.hpp"
48 #include "tcuVectorType.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
67 enum RobustnessFeatureBits
69 RF_IMG_ROBUSTNESS = (1 ),
70 RF_ROBUSTNESS2 = (1 << 1 ),
71 SIF_INT64ATOMICS = (1 << 2 ),
72 RF_PIPELINE_ROBUSTNESS = (1 << 3 ),
73 SBL_SCALAR_BLOCK_LAYOUT = (1 << 4 ),
76 using RobustnessFeatures = deUint32;
78 // Class to wrap a singleton device with the indicated robustness features.
79 template <RobustnessFeatures FEATURES>
82 SingletonDevice (Context& context)
83 : m_context(context), m_instanceWrapper(new CustomInstanceWrapper(context, context.getInstanceExtensions())), m_logicalDevice()
85 // Note we are already checking the needed features are available in checkSupport().
86 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
87 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
88 VkPhysicalDeviceScalarBlockLayoutFeatures scalarBlockLayoutFeatures = initVulkanStructure();
89 VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT shaderImageAtomicInt64Features = initVulkanStructure();
90 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
92 if (FEATURES & SBL_SCALAR_BLOCK_LAYOUT)
94 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"));
95 scalarBlockLayoutFeatures.pNext = features2.pNext;
96 features2.pNext = &scalarBlockLayoutFeatures;
99 if (FEATURES & RF_IMG_ROBUSTNESS)
101 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
103 if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
105 imageRobustnessFeatures.pNext = features2.pNext;
106 features2.pNext = &imageRobustnessFeatures;
110 if (FEATURES & RF_ROBUSTNESS2)
112 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
114 if (!(FEATURES & RF_PIPELINE_ROBUSTNESS))
116 robustness2Features.pNext = features2.pNext;
117 features2.pNext = &robustness2Features;
121 #ifndef CTS_USES_VULKANSC
122 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
123 if (FEATURES & RF_PIPELINE_ROBUSTNESS)
125 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"));
126 pipelineRobustnessFeatures.pNext = features2.pNext;
127 features2.pNext = &pipelineRobustnessFeatures;
131 if (FEATURES & SIF_INT64ATOMICS)
133 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"));
134 shaderImageAtomicInt64Features.pNext = features2.pNext;
135 features2.pNext = &shaderImageAtomicInt64Features;
138 const VkPhysicalDevice physicalDevice = chooseDevice(m_instanceWrapper->instance.getDriver(), m_instanceWrapper->instance, context.getTestContext().getCommandLine());
139 m_instanceWrapper->instance.getDriver().getPhysicalDeviceFeatures2(physicalDevice, &features2);
140 m_logicalDevice = createRobustBufferAccessDevice(context, m_instanceWrapper->instance, m_instanceWrapper->instance.getDriver(), &features2);
142 #ifndef CTS_USES_VULKANSC
143 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), m_instanceWrapper->instance, *m_logicalDevice));
145 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), m_instanceWrapper->instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties(), m_context.getDeviceProperties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
146 #endif // CTS_USES_VULKANSC
153 static VkInstance getInstance(Context& context)
155 if (!m_singletonDevice)
156 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
157 DE_ASSERT(m_singletonDevice);
158 return m_singletonDevice->m_instanceWrapper->instance;
160 static const InstanceInterface& getInstanceInterface(Context& context)
162 if (!m_singletonDevice)
163 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
164 DE_ASSERT(m_singletonDevice);
165 return m_singletonDevice->m_instanceWrapper->instance.getDriver();
168 static VkDevice getDevice(Context& context)
170 if (!m_singletonDevice)
171 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
172 DE_ASSERT(m_singletonDevice);
173 return m_singletonDevice->m_logicalDevice.get();
175 static const DeviceInterface& getDeviceInterface(Context& context)
177 if (!m_singletonDevice)
178 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
179 DE_ASSERT(m_singletonDevice);
180 return *(m_singletonDevice->m_deviceDriver.get());
183 static void destroy()
185 m_singletonDevice.clear();
189 const Context& m_context;
190 std::shared_ptr<CustomInstanceWrapper> m_instanceWrapper;
191 Move<vk::VkDevice> m_logicalDevice;
192 #ifndef CTS_USES_VULKANSC
193 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
195 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
196 #endif // CTS_USES_VULKANSC
198 static SharedPtr<SingletonDevice<FEATURES>> m_singletonDevice;
201 template <RobustnessFeatures FEATURES>
202 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
204 constexpr RobustnessFeatures kImageRobustness = RF_IMG_ROBUSTNESS;
205 constexpr RobustnessFeatures kRobustness2 = RF_ROBUSTNESS2;
206 constexpr RobustnessFeatures kPipelineRobustness = RF_PIPELINE_ROBUSTNESS;
207 constexpr RobustnessFeatures kShaderImageInt64Atomics = SIF_INT64ATOMICS;
208 constexpr RobustnessFeatures kScalarBlockLayout = SBL_SCALAR_BLOCK_LAYOUT;
210 using ImageRobustnessSingleton = SingletonDevice<kImageRobustness>;
211 using Robustness2Singleton = SingletonDevice<kRobustness2>;
213 using ImageRobustnessScalarSingleton = SingletonDevice<kImageRobustness | kScalarBlockLayout>;
214 using Robustness2ScalarSingleton = SingletonDevice<kRobustness2 | kScalarBlockLayout>;
216 using PipelineRobustnessImageRobustnessSingleton = SingletonDevice<kImageRobustness | kPipelineRobustness>;
217 using PipelineRobustnessRobustness2Singleton = SingletonDevice<kRobustness2 | kPipelineRobustness>;
219 using PipelineRobustnessImageRobustnessScalarSingleton = SingletonDevice<kImageRobustness | kPipelineRobustness | kScalarBlockLayout>;
220 using PipelineRobustnessRobustness2ScalarSingleton = SingletonDevice<kRobustness2 | kPipelineRobustness | kScalarBlockLayout>;
222 using ImageRobustnessInt64AtomicsSingleton = SingletonDevice<kImageRobustness | kShaderImageInt64Atomics>;
223 using Robustness2Int64AtomicsSingleton = SingletonDevice<kRobustness2 | kShaderImageInt64Atomics>;
225 using ImageRobustnessInt64AtomicsScalarSingleton = SingletonDevice<kImageRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
226 using Robustness2Int64AtomicsScalarSingleton = SingletonDevice<kRobustness2 | kShaderImageInt64Atomics | kScalarBlockLayout>;
228 using PipelineRobustnessImageRobustnessInt64AtomicsSingleton = SingletonDevice<kImageRobustness | kPipelineRobustness | kShaderImageInt64Atomics>;
229 using PipelineRobustnessRobustness2Int64AtomicsSingleton = SingletonDevice<kRobustness2 | kPipelineRobustness | kShaderImageInt64Atomics>;
231 using PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton = SingletonDevice<kImageRobustness | kPipelineRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
232 using PipelineRobustnessRobustness2Int64AtomicsScalarSingleton = SingletonDevice<kRobustness2 | kPipelineRobustness | kShaderImageInt64Atomics | kScalarBlockLayout>;
234 // Render target / compute grid dimensions
235 static const deUint32 DIM = 8;
237 // treated as a phony VkDescriptorType value
238 #define VERTEX_ATTRIBUTE_FETCH 999
252 VkFlags allShaderStages;
253 VkFlags allPipelineStages;
254 int/*VkDescriptorType*/ descriptorType;
255 VkImageViewType viewType;
256 VkSampleCountFlagBits samples;
262 bool formatQualifier;
264 bool testRobustness2;
265 bool testPipelineRobustness;
266 deUint32 imageDim[3]; // width, height, depth or layers
269 bool needsScalarBlockLayout() const
271 bool scalarNeeded = false;
273 switch (descriptorType)
275 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
276 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
277 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
278 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
282 scalarNeeded = false;
290 static bool formatIsR64(const VkFormat& f)
294 case VK_FORMAT_R64_SINT:
295 case VK_FORMAT_R64_UINT:
302 // Returns the appropriate singleton device for the given case.
303 VkInstance getInstance(Context& ctx, const CaseDef& caseDef)
305 if (caseDef.needsScalarBlockLayout())
307 if (formatIsR64(caseDef.format))
309 if (caseDef.testRobustness2)
310 return Robustness2Int64AtomicsScalarSingleton::getInstance(ctx);
311 return ImageRobustnessInt64AtomicsScalarSingleton::getInstance(ctx);
314 if (caseDef.testRobustness2)
315 return Robustness2ScalarSingleton::getInstance(ctx);
316 return ImageRobustnessScalarSingleton::getInstance(ctx);
319 if (formatIsR64(caseDef.format))
321 if (caseDef.testRobustness2)
322 return Robustness2Int64AtomicsSingleton::getInstance(ctx);
323 return ImageRobustnessInt64AtomicsSingleton::getInstance(ctx);
326 if (caseDef.testRobustness2)
327 return Robustness2Singleton::getInstance(ctx);
328 return ImageRobustnessSingleton::getInstance(ctx);
331 // Returns the appropriate singleton device driver for the given case.
332 const InstanceInterface& getInstanceInterface(Context& ctx, const CaseDef& caseDef)
334 if (caseDef.needsScalarBlockLayout())
336 if (formatIsR64(caseDef.format))
338 if (caseDef.testRobustness2)
339 return Robustness2Int64AtomicsScalarSingleton::getInstanceInterface(ctx);
340 return ImageRobustnessInt64AtomicsScalarSingleton::getInstanceInterface(ctx);
343 if (caseDef.testRobustness2)
344 return Robustness2ScalarSingleton::getInstanceInterface(ctx);
345 return ImageRobustnessScalarSingleton::getInstanceInterface(ctx);
348 if (formatIsR64(caseDef.format))
350 if (caseDef.testRobustness2)
351 return Robustness2Int64AtomicsSingleton::getInstanceInterface(ctx);
352 return ImageRobustnessInt64AtomicsSingleton::getInstanceInterface(ctx);
355 if (caseDef.testRobustness2)
356 return Robustness2Singleton::getInstanceInterface(ctx);
357 return ImageRobustnessSingleton::getInstanceInterface(ctx);
360 // Returns the appropriate singleton device for the given case.
361 VkDevice getLogicalDevice (Context& ctx, const CaseDef& caseDef)
363 if (caseDef.needsScalarBlockLayout())
365 if (caseDef.testPipelineRobustness)
367 if (formatIsR64(caseDef.format))
369 if (caseDef.testRobustness2)
370 return PipelineRobustnessRobustness2Int64AtomicsScalarSingleton::getDevice(ctx);
371 return PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton::getDevice(ctx);
374 if (caseDef.testRobustness2)
375 return PipelineRobustnessRobustness2ScalarSingleton::getDevice(ctx);
376 return PipelineRobustnessImageRobustnessScalarSingleton::getDevice(ctx);
379 if (formatIsR64(caseDef.format))
381 if (caseDef.testRobustness2)
382 return Robustness2Int64AtomicsScalarSingleton::getDevice(ctx);
383 return ImageRobustnessInt64AtomicsScalarSingleton::getDevice(ctx);
386 if (caseDef.testRobustness2)
387 return Robustness2ScalarSingleton::getDevice(ctx);
388 return ImageRobustnessScalarSingleton::getDevice(ctx);
391 if (caseDef.testPipelineRobustness)
393 if (formatIsR64(caseDef.format))
395 if (caseDef.testRobustness2)
396 return PipelineRobustnessRobustness2Int64AtomicsSingleton::getDevice(ctx);
397 return PipelineRobustnessImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
400 if (caseDef.testRobustness2)
401 return PipelineRobustnessRobustness2Singleton::getDevice(ctx);
402 return PipelineRobustnessImageRobustnessSingleton::getDevice(ctx);
405 if (formatIsR64(caseDef.format))
407 if (caseDef.testRobustness2)
408 return Robustness2Int64AtomicsSingleton::getDevice(ctx);
409 return ImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
412 if (caseDef.testRobustness2)
413 return Robustness2Singleton::getDevice(ctx);
414 return ImageRobustnessSingleton::getDevice(ctx);
417 // Returns the appropriate singleton device driver for the given case.
418 const DeviceInterface& getDeviceInterface(Context& ctx, const CaseDef& caseDef)
420 if (caseDef.needsScalarBlockLayout())
422 if (formatIsR64(caseDef.format))
424 if (caseDef.testRobustness2)
425 return Robustness2Int64AtomicsScalarSingleton::getDeviceInterface(ctx);
426 return ImageRobustnessInt64AtomicsScalarSingleton::getDeviceInterface(ctx);
429 if (caseDef.testRobustness2)
430 return Robustness2ScalarSingleton::getDeviceInterface(ctx);
431 return ImageRobustnessScalarSingleton::getDeviceInterface(ctx);
434 if (formatIsR64(caseDef.format))
436 if (caseDef.testRobustness2)
437 return Robustness2Int64AtomicsSingleton::getDeviceInterface(ctx);
438 return ImageRobustnessInt64AtomicsSingleton::getDeviceInterface(ctx);
441 if (caseDef.testRobustness2)
442 return Robustness2Singleton::getDeviceInterface(ctx);
443 return ImageRobustnessSingleton::getDeviceInterface(ctx);
450 vector<VkDescriptorSetLayoutBinding> layoutBindings;
451 vector<deUint8> refData;
455 class RobustnessExtsTestInstance : public TestInstance
458 RobustnessExtsTestInstance (Context& context, const CaseDef& data);
459 ~RobustnessExtsTestInstance (void);
460 tcu::TestStatus iterate (void);
465 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
466 : vkt::TestInstance (context)
471 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
475 class RobustnessExtsTestCase : public TestCase
478 RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data);
479 ~RobustnessExtsTestCase (void);
480 virtual void initPrograms (SourceCollections& programCollection) const;
481 virtual TestInstance* createInstance (Context& context) const;
482 virtual void checkSupport (Context& context) const;
488 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data)
489 : vkt::TestCase (context, name, desc)
494 RobustnessExtsTestCase::~RobustnessExtsTestCase (void)
498 static bool formatIsFloat(const VkFormat& f)
502 case VK_FORMAT_R32_SFLOAT:
503 case VK_FORMAT_R32G32_SFLOAT:
504 case VK_FORMAT_R32G32B32A32_SFLOAT:
511 static bool formatIsSignedInt(const VkFormat& f)
515 case VK_FORMAT_R32_SINT:
516 case VK_FORMAT_R64_SINT:
517 case VK_FORMAT_R32G32_SINT:
518 case VK_FORMAT_R32G32B32A32_SINT:
525 static bool supportsStores(int descriptorType)
527 switch (descriptorType)
529 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
530 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
531 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
532 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
539 #ifndef CTS_USES_VULKANSC
540 static VkPipelineRobustnessCreateInfoEXT getPipelineRobustnessInfo(bool robustness2, int descriptorType)
542 VkPipelineRobustnessCreateInfoEXT robustnessCreateInfo = initVulkanStructure();
544 switch (descriptorType)
546 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
547 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
548 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
549 robustnessCreateInfo.storageBuffers = (robustness2
550 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
551 : VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
554 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
555 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
556 robustnessCreateInfo.images = (robustness2
557 ? VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT
558 : VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT);
561 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
562 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
563 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
564 robustnessCreateInfo.uniformBuffers = (robustness2
565 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
566 : VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
569 case VERTEX_ATTRIBUTE_FETCH:
570 robustnessCreateInfo.vertexInputs = (robustness2
571 ? VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT
572 : VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT);
579 return robustnessCreateInfo;
583 void RobustnessExtsTestCase::checkSupport(Context& context) const
585 const auto& vki = context.getInstanceInterface();
586 const auto physicalDevice = context.getPhysicalDevice();
588 // We need to query feature support using the physical device instead of using the reported context features because robustness2
589 // and image robustness are always disabled in the default device but they may be available.
590 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
591 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
592 VkPhysicalDeviceScalarBlockLayoutFeatures scalarLayoutFeatures = initVulkanStructure();
593 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
595 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
597 if (context.isDeviceFunctionalitySupported("VK_EXT_scalar_block_layout"))
599 scalarLayoutFeatures.pNext = features2.pNext;
600 features2.pNext = &scalarLayoutFeatures;
603 if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
605 imageRobustnessFeatures.pNext = features2.pNext;
606 features2.pNext = &imageRobustnessFeatures;
609 if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
611 robustness2Features.pNext = features2.pNext;
612 features2.pNext = &robustness2Features;
615 #ifndef CTS_USES_VULKANSC
616 VkPhysicalDevicePipelineRobustnessFeaturesEXT pipelineRobustnessFeatures = initVulkanStructure();
617 if (context.isDeviceFunctionalitySupported("VK_EXT_pipeline_robustness"))
619 pipelineRobustnessFeatures.pNext = features2.pNext;
620 features2.pNext = &pipelineRobustnessFeatures;
624 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
626 if (formatIsR64(m_data.format))
628 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
630 VkFormatProperties formatProperties;
631 vki.getPhysicalDeviceFormatProperties(physicalDevice, m_data.format, &formatProperties);
633 #ifndef CTS_USES_VULKANSC
634 const VkFormatProperties3KHR formatProperties3 = context.getFormatProperties(m_data.format);
635 #endif // CTS_USES_VULKANSC
637 switch (m_data.descriptorType)
639 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
640 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
641 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
643 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
644 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
645 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
646 #ifndef CTS_USES_VULKANSC
647 if ((formatProperties3.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) != VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR)
648 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT is not supported");
649 #endif // CTS_USES_VULKANSC
651 case VERTEX_ATTRIBUTE_FETCH:
652 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
653 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
655 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
656 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
657 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
659 default: DE_ASSERT(true);
662 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
664 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
665 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
669 // Check needed properties and features
670 if (m_data.needsScalarBlockLayout() && !scalarLayoutFeatures.scalarBlockLayout)
671 TCU_THROW(NotSupportedError, "Scalar block layout not supported");
673 if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
674 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
676 if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
677 TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
679 if (m_data.stage == STAGE_RAYGEN)
680 context.requireDeviceFunctionality("VK_NV_ray_tracing");
682 switch (m_data.descriptorType)
684 default: DE_ASSERT(0); // Fallthrough
685 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
686 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
687 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
688 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
689 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
690 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
691 case VERTEX_ATTRIBUTE_FETCH:
692 if (m_data.testRobustness2)
694 if (!robustness2Features.robustBufferAccess2)
695 TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
699 // This case is not tested here.
703 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
704 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
705 if (m_data.testRobustness2)
707 if (!robustness2Features.robustImageAccess2)
708 TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
712 if (!imageRobustnessFeatures.robustImageAccess)
713 TCU_THROW(NotSupportedError, "robustImageAccess not supported");
718 if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
719 TCU_THROW(NotSupportedError, "nullDescriptor not supported");
721 // The fill shader for 64-bit multisample image tests uses a storage image.
722 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
723 !features2.features.shaderStorageImageMultisample)
724 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
726 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
727 m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
728 !features2.features.shaderStorageImageMultisample)
729 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
731 if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(0, 1, 1, 0)))
732 TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
734 #ifndef CTS_USES_VULKANSC
735 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
736 !m_data.formatQualifier)
738 const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
739 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
740 TCU_THROW(NotSupportedError, "Format does not support reading without format");
741 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
742 TCU_THROW(NotSupportedError, "Format does not support writing without format");
745 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
746 !m_data.formatQualifier &&
747 (!features2.features.shaderStorageImageReadWithoutFormat || !features2.features.shaderStorageImageWriteWithoutFormat))
748 TCU_THROW(NotSupportedError, "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
749 #endif // CTS_USES_VULKANSC
751 if (m_data.pushDescriptor)
752 context.requireDeviceFunctionality("VK_KHR_push_descriptor");
754 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
755 TCU_THROW(NotSupportedError, "Cube array image view type not supported");
757 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
758 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
760 #ifndef CTS_USES_VULKANSC
761 if (m_data.testPipelineRobustness && !pipelineRobustnessFeatures.pipelineRobustness)
762 TCU_THROW(NotSupportedError, "pipelineRobustness not supported");
766 void generateLayout(Layout &layout, const CaseDef &caseDef)
768 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
769 int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
770 bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
772 for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
774 VkDescriptorSetLayoutBinding &binding = bindings[b];
776 binding.pImmutableSamplers = NULL;
777 binding.stageFlags = caseDef.allShaderStages;
778 binding.descriptorCount = 1;
782 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
783 else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
784 binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
787 if (caseDef.nullDescriptor)
790 if (caseDef.bufferLen == 0)
792 // Clear color values for image tests
793 static deUint32 urefData[4] = { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
794 static deUint64 urefData64[4] = { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
795 static float frefData[4] = { 123.f, 234.f, 345.f, 456.f };
797 if (formatIsR64(caseDef.format))
799 layout.refData.resize(32);
800 deUint64 *ptr = (deUint64 *)layout.refData.data();
802 for (unsigned int i = 0; i < 4; ++i)
804 ptr[i] = urefData64[i];
809 layout.refData.resize(16);
810 deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
815 layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
816 for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
818 if (formatIsFloat(caseDef.format))
820 float *f = (float *)layout.refData.data() + i;
821 *f = 2.0f*(float)i + 3.0f;
823 if (formatIsR64(caseDef.format))
825 deUint64 *u = (deUint64 *)layout.refData.data() + i;
830 int *u = (int *)layout.refData.data() + i;
837 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
840 // Fetch from the descriptor.
841 switch (caseDef.descriptorType)
843 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
844 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
845 s << vecType << "(ubo0_1.val[" << coord << "]";
846 for (int i = numComponents; i < 4; ++i) s << ", 0";
849 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
850 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
851 s << vecType << "(ssbo0_1.val[" << coord << "]";
852 for (int i = numComponents; i < 4; ++i) s << ", 0";
855 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
856 s << "texelFetch(texbo0_1, " << coord << ")";
858 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
859 s << "imageLoad(image0_1, " << coord << ")";
861 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
862 if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
863 s << "texelFetch(texture0_1, " << coord << ")";
865 s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
867 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
868 s << "imageLoad(image0_1, " << coord << ")";
870 case VERTEX_ATTRIBUTE_FETCH:
873 default: DE_ASSERT(0);
878 static const int storeValue = 123;
880 // Get the value stored by genStore.
881 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
884 switch (descriptorType)
886 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
887 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
888 s << vecType << "(" << bufType << "(" << storeValue << ")";
889 for (int i = numComponents; i < 4; ++i) s << ", 0";
892 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
893 s << vecType << "(" << storeValue << ")";
895 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
896 s << vecType << "(" << storeValue << ")";
898 default: DE_ASSERT(0);
903 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
906 // Store to the descriptor.
907 switch (descriptorType)
909 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
910 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
911 s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
913 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
914 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
916 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
917 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
919 default: DE_ASSERT(0);
924 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
927 // Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
928 switch (descriptorType)
930 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
931 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
932 s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
934 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
935 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
937 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
938 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
940 default: DE_ASSERT(0);
945 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
947 const char* orderPart;
948 const char* typePart;
950 switch (format.order)
952 case tcu::TextureFormat::R: orderPart = "r"; break;
953 case tcu::TextureFormat::RG: orderPart = "rg"; break;
954 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
955 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
958 DE_FATAL("Impossible");
964 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
965 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
967 case tcu::TextureFormat::UNSIGNED_INT64: typePart = "64ui"; break;
968 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
969 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
970 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
972 case tcu::TextureFormat::SIGNED_INT64: typePart = "64i"; break;
973 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
974 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
975 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
977 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
978 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
980 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
981 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
984 DE_FATAL("Impossible");
988 return std::string() + orderPart + typePart;
991 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
996 if (samples != VK_SAMPLE_COUNT_1_BIT)
999 string coord = "ivec" + to_string(numCoords) + "(";
1001 for (int i = 0; i < numCoords; ++i)
1007 if (i < numCoords - 1)
1012 // Append sample coordinate
1013 if (samples != VK_SAMPLE_COUNT_1_BIT)
1016 if (dim == numCoords)
1024 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
1025 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
1027 // dim can be 3 for cube_array. Reuse the number of layers in that case.
1028 dim = std::min(dim, 2);
1031 return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
1033 string coord = "vec" + to_string(numCoords) + "(";
1035 for (int i = 0; i < numCoords; ++i)
1041 if (i < numNormalizedCoords)
1042 coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
1043 if (i < numCoords - 1)
1050 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
1052 VkFormat format = m_data.format;
1055 generateLayout(layout, m_data);
1057 if (layout.layoutBindings.size() > 1 &&
1058 layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1060 if (format == VK_FORMAT_R64_SINT)
1061 format = VK_FORMAT_R32G32_SINT;
1063 if (format == VK_FORMAT_R64_UINT)
1064 format = VK_FORMAT_R32G32_UINT;
1067 std::stringstream decls, checks;
1069 const string r64 = formatIsR64(format) ? "64" : "";
1070 const string i64Type = formatIsR64(format) ? "64_t" : "";
1071 const string vecType = formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
1072 const string qLevelType = vecType == "vec4" ? "float" : ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
1074 decls << "uvec4 abs(uvec4 x) { return x; }\n";
1075 if (formatIsR64(format))
1076 decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
1077 decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
1080 const int componetsSize = (formatIsR64(format) ? 8 : 4);
1081 int refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
1082 // Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
1083 // robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
1084 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1085 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1087 refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
1089 if (m_data.nullDescriptor)
1090 refDataNumElements = 4;
1092 if (formatIsFloat(format))
1094 decls << "float refData[" << refDataNumElements << "] = {";
1096 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1100 decls << ((const float *)layout.refData.data())[i];
1102 while (i < refDataNumElements)
1110 else if (formatIsR64(format))
1112 decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
1114 for (i = 0; i < (int)layout.refData.size() / 8; ++i)
1118 decls << ((const deUint64 *)layout.refData.data())[i] << "l";
1120 while (i < refDataNumElements)
1130 decls << "int" << " refData[" << refDataNumElements << "] = {";
1132 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
1136 decls << ((const int *)layout.refData.data())[i];
1138 while (i < refDataNumElements)
1148 decls << vecType << " zzzz = " << vecType << "(0);\n";
1149 decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
1150 decls << vecType << " expectedIB;\n";
1152 string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
1153 string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
1154 string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
1156 string imageDim = "";
1157 int numCoords, numNormalizedCoords;
1158 bool layered = false;
1159 switch (m_data.viewType)
1161 default: DE_ASSERT(0); // Fallthrough
1162 case VK_IMAGE_VIEW_TYPE_1D: imageDim = "1D"; numCoords = 1; numNormalizedCoords = 1; break;
1163 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: imageDim = "1DArray"; numCoords = 2; numNormalizedCoords = 1; layered = true; break;
1164 case VK_IMAGE_VIEW_TYPE_2D: imageDim = "2D"; numCoords = 2; numNormalizedCoords = 2; break;
1165 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: imageDim = "2DArray"; numCoords = 3; numNormalizedCoords = 2; layered = true; break;
1166 case VK_IMAGE_VIEW_TYPE_3D: imageDim = "3D"; numCoords = 3; numNormalizedCoords = 3; break;
1167 case VK_IMAGE_VIEW_TYPE_CUBE: imageDim = "Cube"; numCoords = 3; numNormalizedCoords = 3; break;
1168 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: imageDim = "CubeArray"; numCoords = 4; numNormalizedCoords = 3; layered = true; break;
1170 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
1172 switch (m_data.viewType)
1174 default: DE_ASSERT(0); // Fallthrough
1175 case VK_IMAGE_VIEW_TYPE_2D: imageDim = "2DMS"; break;
1176 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: imageDim = "2DMSArray"; break;
1180 bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
1182 // Special case imageLoad(imageCubeArray, ...) which uses ivec3
1183 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
1184 m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1189 int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
1191 if (numComponents == 1)
1192 bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
1194 bufType = imgprefix + "vec" + std::to_string(numComponents);
1196 // For UBO's, which have a declared size in the shader, don't access outside that size.
1197 bool declaredSize = false;
1198 switch (m_data.descriptorType) {
1199 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1200 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1201 declaredSize = true;
1207 checks << " int inboundcoords, clampedLayer;\n";
1208 checks << " " << vecType << " expectedIB2;\n";
1212 checks << " [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
1214 checks << " [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
1219 checks << " [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
1221 checks << " [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
1224 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1225 checks << " int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
1227 checks << " int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
1229 decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
1231 const char *vol = m_data.vol ? "volatile " : "";
1232 const char *ro = m_data.readOnly ? "readonly " : "";
1234 // Construct the declaration for the binding
1235 switch (m_data.descriptorType)
1237 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1238 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1239 decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
1241 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1242 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1243 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
1244 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
1246 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1249 case VK_FORMAT_R64_SINT:
1250 decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
1252 case VK_FORMAT_R64_UINT:
1253 decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
1256 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
1259 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1260 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
1262 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1263 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
1265 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1268 case VK_FORMAT_R64_SINT:
1269 decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
1271 case VK_FORMAT_R64_UINT:
1272 decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
1275 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
1279 case VERTEX_ATTRIBUTE_FETCH:
1280 if (formatIsR64(format))
1282 decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
1286 decls << "layout(location = 0) in " << vecType << " attr;\n";
1289 default: DE_ASSERT(0);
1295 switch (m_data.descriptorType)
1297 default: DE_ASSERT(0); // Fallthrough
1298 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1299 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1300 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1301 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1302 expectedOOB = "zzzz";
1305 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1306 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1307 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1308 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1309 case VERTEX_ATTRIBUTE_FETCH:
1310 if (numComponents == 1)
1312 expectedOOB = "zzzo";
1314 else if (numComponents == 2)
1316 expectedOOB = "zzzo";
1320 expectedOOB = "zzzz";
1327 switch (m_data.descriptorType)
1329 default: DE_ASSERT(0); // Fallthrough
1330 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1331 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1332 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1333 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1334 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1335 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1336 case VERTEX_ATTRIBUTE_FETCH:
1339 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1340 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1345 if (m_data.nullDescriptor)
1347 checks << " expectedIB = zzzz;\n";
1348 checks << " inboundcoords = 0;\n";
1349 checks << " int paddedinboundcoords = 0;\n";
1350 // Vertex attribute fetch still gets format conversion applied
1351 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1352 expectedOOB = "zzzz";
1356 checks << " expectedIB.x = refData[" << idx << "];\n";
1357 if (numComponents > 1)
1359 checks << " expectedIB.y = refData[" << idx << "+1];\n";
1363 checks << " expectedIB.y = 0;\n";
1365 if (numComponents > 2)
1367 checks << " expectedIB.z = refData[" << idx << "+2];\n";
1368 checks << " expectedIB.w = refData[" << idx << "+3];\n";
1372 checks << " expectedIB.z = 0;\n";
1373 checks << " expectedIB.w = " << defaultw << ";\n";
1376 switch (m_data.descriptorType)
1378 default: DE_ASSERT(0); // Fallthrough
1379 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1380 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1381 // UBOs can either strictly bounds check against inboundcoords, or can
1382 // return the contents from memory for the range padded up to paddedinboundcoords.
1383 checks << " int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1385 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1386 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1387 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1388 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1389 case VERTEX_ATTRIBUTE_FETCH:
1390 checks << " inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1392 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1393 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1394 // set per-component below
1399 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1400 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1401 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1402 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1405 for (int i = 0; i < numCoords; ++i)
1407 // Treat i==3 coord (cube array layer) like i == 2
1408 deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1409 if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1410 checks << " inboundcoords = " << coordDim << ";\n";
1412 string coord = genCoord("c", numCoords, m_data.samples, i);
1413 string inboundcoords =
1414 m_data.nullDescriptor ? "0" :
1415 (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1417 checks << " if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1418 if (m_data.formatQualifier &&
1419 (format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1421 checks << " if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1426 for (int i = 0; i < numCoords; ++i)
1428 // Treat i==3 coord (cube array layer) like i == 2
1429 deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1430 if (!m_data.nullDescriptor)
1432 switch (m_data.descriptorType)
1436 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1437 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1438 checks << " inboundcoords = " << coordDim << ";\n";
1443 string coord = genCoord("c", numCoords, m_data.samples, i);
1445 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1447 if (formatIsR64(format))
1449 checks << " temp.x = attr;\n";
1450 checks << " temp.y = 0l;\n";
1451 checks << " temp.z = 0l;\n";
1452 checks << " temp.w = 0l;\n";
1453 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1457 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1458 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1460 // Accumulate any incorrect values.
1461 checks << " accum += abs(temp);\n";
1463 // Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1464 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1465 !(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1466 (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1468 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1470 checks << " expectedIB2 = expectedIB;\n";
1472 // Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1473 if (dataDependsOnLayer && i == numNormalizedCoords)
1474 checks << " if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1476 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1478 if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1480 checks << " if (temp == zzzz) temp = " << vecType << "(0);\n";
1481 if (m_data.formatQualifier && numComponents < 4)
1482 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1483 checks << " else temp = " << vecType << "(1);\n";
1486 // multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1487 checks << " if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1491 // Storage buffers may be split into per-component loads. Generate a second
1492 // expected out of bounds value where some subset of the components are
1493 // actually in-bounds. If both loads and stores are split into per-component
1494 // accesses, then the result value can be a mix of storeValue and zero.
1495 string expectedOOB2 = expectedOOB;
1496 string expectedOOB3 = expectedOOB;
1497 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1498 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1499 !m_data.nullDescriptor)
1501 int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1502 int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1503 string sstoreValue = de::toString(storeValue);
1509 expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1510 expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1513 expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1514 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1517 expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1518 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1523 // Entirely in-bounds.
1524 checks << " if (c >= 0 && c < inboundcoords) {\n"
1525 " if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1528 // normal out-of-bounds value
1529 if (m_data.testRobustness2)
1530 checks << " else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1532 // image_robustness relaxes alpha which is allowed to be zero or one
1533 checks << " else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1535 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1536 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1538 checks << " else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1541 // null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1542 if (m_data.nullDescriptor && m_data.formatQualifier &&
1543 (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1545 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1547 // non-volatile value replaced with stored value
1548 if (supportsStores(m_data.descriptorType) && !m_data.vol)
1549 checks << " else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1551 // value straddling the boundary, returning a partial vector
1552 if (expectedOOB2 != expectedOOB)
1553 checks << " else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1554 if (expectedOOB3 != expectedOOB)
1555 checks << " else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1558 checks << " else temp = " << vecType << "(1);\n";
1560 // Accumulate any incorrect values.
1561 checks << " accum += abs(temp);\n";
1563 // Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1564 if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1566 // Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1567 string coord0 = genCoord("0", numCoords, m_data.samples, i);
1568 checks << " if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1569 checks << " if (c != 0) temp -= " << expectedOOB << ";\n";
1570 checks << " accum += abs(temp);\n";
1573 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1574 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1576 string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1578 checks << " expectedIB2 = expectedIB;\n";
1580 // Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1581 if (dataDependsOnLayer && i == numNormalizedCoords)
1583 checks << " clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1584 checks << " expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1587 stringstream normexpected;
1588 // Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1589 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1590 m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1591 (layered && i == numCoords-1))
1592 normexpected << " temp -= expectedIB2;\n";
1595 normexpected << " if (c >= 0 && c < inboundcoords)\n";
1596 normexpected << " temp -= expectedIB2;\n";
1597 normexpected << " else\n";
1598 if (m_data.testRobustness2)
1599 normexpected << " temp -= " << expectedOOB << ";\n";
1601 // image_robustness relaxes alpha which is allowed to be zero or one
1602 normexpected << " temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1605 checks << " temp = texture(texture0_1, " << coordNorm << ");\n";
1606 checks << normexpected.str();
1607 checks << " accum += abs(temp);\n";
1608 checks << " temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1609 checks << normexpected.str();
1610 checks << " accum += abs(temp);\n";
1611 checks << " temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1612 checks << normexpected.str();
1613 checks << " accum += abs(temp);\n";
1615 if (m_data.nullDescriptor)
1617 const char *sizeswiz;
1618 switch (m_data.viewType)
1620 default: DE_ASSERT(0); // Fallthrough
1621 case VK_IMAGE_VIEW_TYPE_1D: sizeswiz = ".xxxx"; break;
1622 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: sizeswiz = ".xyxx"; break;
1623 case VK_IMAGE_VIEW_TYPE_2D: sizeswiz = ".xyxx"; break;
1624 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: sizeswiz = ".xyzx"; break;
1625 case VK_IMAGE_VIEW_TYPE_3D: sizeswiz = ".xyzx"; break;
1626 case VK_IMAGE_VIEW_TYPE_CUBE: sizeswiz = ".xyxx"; break;
1627 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: sizeswiz = ".xyzx"; break;
1629 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1631 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1633 checks << " temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1634 checks << " accum += abs(temp);\n";
1636 // checking textureSize with clearly out of range LOD values
1637 checks << " temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1638 checks << " accum += abs(temp);\n";
1639 checks << " temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1640 checks << " accum += abs(temp);\n";
1644 checks << " temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1645 checks << " accum += abs(temp);\n";
1646 checks << " temp = textureSamples(texture0_1).xxxx;\n";
1647 checks << " accum += abs(temp);\n";
1650 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1652 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1654 checks << " temp = imageSize(image0_1)" << sizeswiz <<";\n";
1655 checks << " accum += abs(temp);\n";
1659 checks << " temp = imageSize(image0_1)" << sizeswiz <<";\n";
1660 checks << " accum += abs(temp);\n";
1661 checks << " temp = imageSamples(image0_1).xxxx;\n";
1662 checks << " accum += abs(temp);\n";
1665 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1666 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1668 // expect zero for runtime-sized array .length()
1669 checks << " temp = " << vecType << "(ssbo0_1.val.length());\n";
1670 checks << " accum += abs(temp);\n";
1671 checks << " temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1672 checks << " accum += abs(temp);\n";
1678 // outside the coordinates loop because we only need to call it once
1679 if (m_data.nullDescriptor &&
1680 m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1681 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1683 checks << " temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1684 checks << " temp = " << vecType << "(temp_ql);\n";
1685 checks << " accum += abs(temp);\n";
1687 if (m_data.stage == STAGE_FRAGMENT)
1689 // as here we only want to check that textureQueryLod returns 0 when
1690 // texture0_1 is null, we don't need to use the actual texture coordinates
1691 // (and modify the vertex shader below to do so). Any coordinates are fine.
1692 // gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1693 std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1694 checks << " vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1695 checks << " temp_ql = " << qLevelType << "(ceil(abs(lod.x) + abs(lod.y)));\n";
1696 checks << " temp = " << vecType << "(temp_ql);\n";
1697 checks << " accum += abs(temp);\n";
1702 const bool needsScalarLayout = m_data.needsScalarBlockLayout();
1703 const uint32_t shaderBuildOptions = (needsScalarLayout
1704 ? static_cast<uint32_t>(vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS)
1707 const bool is64BitFormat = formatIsR64(m_data.format);
1708 std::string support = "#version 460 core\n"
1709 "#extension GL_EXT_nonuniform_qualifier : enable\n" +
1710 (needsScalarLayout ? std::string("#extension GL_EXT_scalar_block_layout : enable\n") : std::string()) +
1711 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1712 "#extension GL_EXT_control_flow_attributes : enable\n"
1713 "#extension GL_EXT_shader_image_load_formatted : enable\n";
1714 std::string SupportR64 = "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1715 "#extension GL_EXT_shader_image_int64 : require\n";
1717 support += SupportR64;
1718 if (m_data.stage == STAGE_RAYGEN)
1719 support += "#extension GL_NV_ray_tracing : require\n";
1721 std::string code = " " + vecType + " accum = " + vecType + "(0);\n"
1722 " " + vecType + " temp;\n"
1723 " " + qLevelType + " temp_ql;\n" +
1725 " " + vecType + " color = (accum != " + vecType + "(0)) ? " + vecType + "(0,0,0,0) : " + vecType + "(1,0,0,1);\n";
1727 switch (m_data.stage)
1729 default: DE_ASSERT(0); // Fallthrough
1732 std::stringstream css;
1735 "layout(local_size_x = 1, local_size_y = 1) in;\n"
1739 " imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1742 programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1743 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1748 std::stringstream css;
1754 " imageStore(image0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1757 programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1758 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1763 std::stringstream vss;
1769 " imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1770 " gl_PointSize = 1.0f;\n"
1771 " gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1774 programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1775 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1778 case STAGE_FRAGMENT:
1780 std::stringstream vss;
1782 "#version 450 core\n"
1785 // full-viewport quad
1786 " gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1789 programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1790 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1792 std::stringstream fss;
1798 " imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1801 programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1802 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1807 // The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1808 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1810 const std::string ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1811 std::stringstream fillShader;
1817 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1818 "layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1819 << string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1821 "layout(std430, binding = 1) buffer inputBuffer\n"
1823 " int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1828 " int gx = int(gl_GlobalInvocationID.x);\n"
1829 " int gy = int(gl_GlobalInvocationID.y);\n"
1830 " int gz = int(gl_GlobalInvocationID.z);\n"
1831 " uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1833 for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1835 fillShader << " imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1838 fillShader << "}\n";
1840 programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1841 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, shaderBuildOptions);
1846 VkImageType imageViewTypeToImageType (VkImageViewType type)
1850 case VK_IMAGE_VIEW_TYPE_1D:
1851 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return VK_IMAGE_TYPE_1D;
1852 case VK_IMAGE_VIEW_TYPE_2D:
1853 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1854 case VK_IMAGE_VIEW_TYPE_CUBE:
1855 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return VK_IMAGE_TYPE_2D;
1856 case VK_IMAGE_VIEW_TYPE_3D: return VK_IMAGE_TYPE_3D;
1861 return VK_IMAGE_TYPE_2D;
1864 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1866 return new RobustnessExtsTestInstance(context, m_data);
1869 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1871 const VkInstance instance = getInstance(m_context, m_data);
1872 const InstanceInterface& vki = getInstanceInterface(m_context, m_data);
1873 const VkDevice device = getLogicalDevice(m_context, m_data);
1874 const vk::DeviceInterface& vk = getDeviceInterface(m_context, m_data);
1875 const VkPhysicalDevice physicalDevice = chooseDevice(vki, instance, m_context.getTestContext().getCommandLine());
1876 SimpleAllocator allocator (vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1879 generateLayout(layout, m_data);
1881 // Get needed properties.
1882 VkPhysicalDeviceProperties2 properties;
1883 deMemset(&properties, 0, sizeof(properties));
1884 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1885 void** pNextTail = &properties.pNext;
1887 #ifndef CTS_USES_VULKANSC
1888 VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
1889 deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
1890 rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
1893 VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties;
1894 deMemset(&robustness2Properties, 0, sizeof(robustness2Properties));
1895 robustness2Properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT;
1897 #ifndef CTS_USES_VULKANSC
1898 if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
1900 *pNextTail = &rayTracingProperties;
1901 pNextTail = &rayTracingProperties.pNext;
1905 if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1907 *pNextTail = &robustness2Properties;
1908 pNextTail = &robustness2Properties.pNext;
1911 vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1913 if (m_data.testRobustness2)
1915 if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1916 robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1917 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1919 if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1920 robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1921 !deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1922 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1925 VkPipelineBindPoint bindPoint;
1927 switch (m_data.stage)
1930 bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1932 #ifndef CTS_USES_VULKANSC
1934 bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1938 bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1942 Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
1943 Move<vk::VkDescriptorPool> descriptorPool;
1944 Move<vk::VkDescriptorSet> descriptorSet;
1946 int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1947 int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1949 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1951 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1953 #ifndef CTS_USES_VULKANSC
1954 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1956 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
1959 // Create a layout and allocate a descriptor set for it.
1961 const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1963 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1967 (deUint32)bindings.size(),
1968 bindings.empty() ? DE_NULL : bindings.data()
1971 descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1973 vk::DescriptorPoolBuilder poolBuilder;
1974 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1975 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1976 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1977 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1978 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1979 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1980 poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1981 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1983 descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1985 const void *pNext = DE_NULL;
1987 if (!m_data.pushDescriptor)
1988 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1990 de::MovePtr<BufferWithMemory> buffer;
1992 deUint8 *bufferPtr = DE_NULL;
1993 if (!m_data.nullDescriptor)
1995 // Create a buffer to hold data for all descriptors.
1996 VkDeviceSize size = de::max(
1997 (VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
2000 VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
2001 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2002 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2004 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
2005 usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
2007 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2008 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2010 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
2011 usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
2013 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
2015 usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
2017 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
2019 usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
2021 else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2023 size = m_data.bufferLen;
2026 buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2027 vk, device, allocator, makeBufferCreateInfo(size, usage), MemoryRequirement::HostVisible));
2028 bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
2030 deMemset(bufferPtr, 0x3f, (size_t)size);
2032 deMemset(bufferPtr, 0, m_data.bufferLen);
2033 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
2034 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2036 deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
2038 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
2039 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2041 deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
2045 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
2047 Move<VkDescriptorSetLayout> descriptorSetLayoutR64;
2048 Move<VkDescriptorPool> descriptorPoolR64;
2049 Move<VkDescriptorSet> descriptorSetFillImage;
2050 Move<VkShaderModule> shaderModuleFillImage;
2051 Move<VkPipelineLayout> pipelineLayoutFillImage;
2052 Move<VkPipeline> pipelineFillImage;
2054 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, 0, queueFamilyIndex);
2055 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2058 vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
2060 const VkImageSubresourceRange barrierRange =
2062 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2063 0u, // deUint32 baseMipLevel;
2064 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
2065 0u, // deUint32 baseArrayLayer;
2066 VK_REMAINING_ARRAY_LAYERS // deUint32 layerCount;
2069 VkImageMemoryBarrier preImageBarrier =
2071 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
2072 DE_NULL, // const void* pNext
2073 0u, // VkAccessFlags srcAccessMask
2074 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
2075 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
2076 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
2077 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
2078 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
2079 DE_NULL, // VkImage image
2080 barrierRange, // VkImageSubresourceRange subresourceRange;
2083 VkImageMemoryBarrier postImageBarrier =
2085 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2086 DE_NULL, // const void* pNext;
2087 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
2088 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
2089 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
2090 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout;
2091 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2092 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2093 DE_NULL, // VkImage image;
2094 barrierRange, // VkImageSubresourceRange subresourceRange;
2097 vk::VkClearColorValue clearValue;
2098 clearValue.uint32[0] = 0u;
2099 clearValue.uint32[1] = 0u;
2100 clearValue.uint32[2] = 0u;
2101 clearValue.uint32[3] = 0u;
2103 beginCommandBuffer(vk, *cmdBuffer, 0u);
2105 typedef vk::Unique<vk::VkBufferView> BufferViewHandleUp;
2106 typedef de::SharedPtr<BufferViewHandleUp> BufferViewHandleSp;
2107 typedef de::SharedPtr<ImageWithMemory> ImageWithMemorySp;
2108 typedef de::SharedPtr<Unique<VkImageView> > VkImageViewSp;
2109 typedef de::MovePtr<BufferWithMemory> BufferWithMemoryMp;
2111 vector<BufferViewHandleSp> bufferViews(1);
2113 VkImageCreateFlags mutableFormatFlag = 0;
2114 // The 64-bit image tests use a view format which differs from the image.
2115 if (formatIsR64(m_data.format))
2116 mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
2117 VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
2118 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2119 imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
2121 const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(vki,
2123 m_data.format).optimalTilingFeatures &
2124 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
2126 const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
2128 const VkImageCreateInfo outputImageCreateInfo =
2130 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2131 DE_NULL, // const void* pNext;
2132 mutableFormatFlag, // VkImageCreateFlags flags;
2133 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2134 m_data.format, // VkFormat format;
2136 DIM, // deUint32 width;
2137 DIM, // deUint32 height;
2138 1u // deUint32 depth;
2139 }, // VkExtent3D extent;
2140 1u, // deUint32 mipLevels;
2141 1u, // deUint32 arrayLayers;
2142 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2143 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2144 VK_IMAGE_USAGE_STORAGE_BIT
2146 | VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2147 | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
2148 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2149 0u, // deUint32 queueFamilyIndexCount;
2150 DE_NULL, // const deUint32* pQueueFamilyIndices;
2151 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2154 deUint32 width = m_data.imageDim[0];
2155 deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
2156 deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2157 deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
2158 m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
2159 m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
2160 m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
2162 const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
2164 const VkImageCreateInfo imageCreateInfo =
2166 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2167 DE_NULL, // const void* pNext;
2168 imageCreateFlags, // VkImageCreateFlags flags;
2169 imageViewTypeToImageType(m_data.viewType), // VkImageType imageType;
2170 m_data.format, // VkFormat format;
2172 width, // deUint32 width;
2173 height, // deUint32 height;
2174 depth // deUint32 depth;
2175 }, // VkExtent3D extent;
2176 1u, // deUint32 mipLevels;
2177 layers, // deUint32 arrayLayers;
2178 m_data.samples, // VkSampleCountFlagBits samples;
2179 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2182 | VK_IMAGE_USAGE_TRANSFER_SRC_BIT
2183 | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
2184 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2185 0u, // deUint32 queueFamilyIndexCount;
2186 DE_NULL, // const deUint32* pQueueFamilyIndices;
2187 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2190 VkImageViewCreateInfo imageViewCreateInfo =
2192 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2193 DE_NULL, // const void* pNext;
2194 (VkImageViewCreateFlags)0u, // VkImageViewCreateFlags flags;
2195 DE_NULL, // VkImage image;
2196 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2197 m_data.format, // VkFormat format;
2199 VK_COMPONENT_SWIZZLE_IDENTITY,
2200 VK_COMPONENT_SWIZZLE_IDENTITY,
2201 VK_COMPONENT_SWIZZLE_IDENTITY,
2202 VK_COMPONENT_SWIZZLE_IDENTITY
2203 }, // VkComponentMapping components;
2205 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2206 0u, // deUint32 baseMipLevel;
2207 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
2208 0u, // deUint32 baseArrayLayer;
2209 VK_REMAINING_ARRAY_LAYERS // deUint32 layerCount;
2210 } // VkImageSubresourceRange subresourceRange;
2213 vector<ImageWithMemorySp> images(2);
2214 vector<VkImageViewSp> imageViews(2);
2216 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2218 deUint32 *ptr = (deUint32 *)bufferPtr;
2219 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2222 BufferWithMemoryMp bufferImageR64;
2223 BufferWithMemoryMp bufferOutputImageR64;
2224 const VkDeviceSize sizeOutputR64 = 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
2225 const VkDeviceSize sizeOneLayers = 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
2226 const VkDeviceSize sizeImageR64 = sizeOneLayers * layers;
2228 if (formatIsR64(m_data.format))
2230 bufferOutputImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2231 vk, device, allocator,
2232 makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
2233 MemoryRequirement::HostVisible));
2235 deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
2237 for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
2239 bufferUint64Ptr[ndx] = 0;
2241 flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
2243 bufferImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2244 vk, device, allocator,
2245 makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
2246 MemoryRequirement::HostVisible));
2248 for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
2250 bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
2251 bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
2253 for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
2255 bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
2258 flushAlloc(vk, device, bufferImageR64->getAllocation());
2261 for (size_t b = 0; b < bindings.size(); ++b)
2263 VkDescriptorSetLayoutBinding &binding = bindings[b];
2265 if (binding.descriptorCount == 0)
2267 if (b == 1 && m_data.nullDescriptor)
2270 DE_ASSERT(binding.descriptorCount == 1);
2271 switch (binding.descriptorType)
2273 default: DE_ASSERT(0); // Fallthrough
2274 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2275 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2276 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2277 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2279 deUint32 *ptr = (deUint32 *)bufferPtr;
2280 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2283 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2284 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2286 deUint32 *ptr = (deUint32 *)bufferPtr;
2287 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2289 const vk::VkBufferViewCreateInfo viewCreateInfo =
2291 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2293 (vk::VkBufferViewCreateFlags)0,
2295 m_data.format, // format
2296 (vk::VkDeviceSize)0, // offset
2297 (vk::VkDeviceSize)m_data.bufferLen // range
2299 vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2300 bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2303 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2304 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2306 if (bindings.size() > 1 &&
2307 bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2309 if (m_data.format == VK_FORMAT_R64_SINT)
2310 imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2312 if (m_data.format == VK_FORMAT_R64_UINT)
2313 imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2318 images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2319 imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2323 images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2324 imageViewCreateInfo.viewType = m_data.viewType;
2326 imageViewCreateInfo.image = **images[b];
2327 imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2329 VkImage img = **images[b];
2330 const VkBuffer& bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2331 const VkImageCreateInfo& imageInfo = ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2332 const deUint32 clearLayers = b == 0 ? 1 : layers;
2334 if (!formatIsR64(m_data.format))
2336 preImageBarrier.image = img;
2339 if (formatIsFloat(m_data.format))
2341 deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2343 else if (formatIsSignedInt(m_data.format))
2345 deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2349 deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2352 postImageBarrier.image = img;
2354 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2356 for (unsigned int i = 0; i < clearLayers; ++i)
2358 const VkImageSubresourceRange clearRange =
2360 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2361 0u, // deUint32 baseMipLevel;
2362 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
2363 i, // deUint32 baseArrayLayer;
2364 1 // deUint32 layerCount;
2367 vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2369 // Use same data for all faces for cube(array), otherwise make value a function of the layer
2370 if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2372 if (formatIsFloat(m_data.format))
2373 clearValue.float32[0] += 1;
2374 else if (formatIsSignedInt(m_data.format))
2375 clearValue.int32[0] += 1;
2377 clearValue.uint32[0] += 1;
2380 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2384 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2386 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2387 const VkImageMemoryBarrier imageBarrierPre = makeImageMemoryBarrier(0,
2388 VK_ACCESS_SHADER_WRITE_BIT,
2389 VK_IMAGE_LAYOUT_UNDEFINED,
2390 VK_IMAGE_LAYOUT_GENERAL,
2393 const VkImageMemoryBarrier imageBarrierPost = makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2394 VK_ACCESS_SHADER_READ_BIT,
2395 VK_IMAGE_LAYOUT_GENERAL,
2396 VK_IMAGE_LAYOUT_GENERAL,
2400 descriptorSetLayoutR64 =
2401 DescriptorSetLayoutBuilder()
2402 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2403 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2407 DescriptorPoolBuilder()
2408 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2409 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2410 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2412 descriptorSetFillImage = makeDescriptorSet(vk,
2415 *descriptorSetLayoutR64);
2417 shaderModuleFillImage = createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2418 pipelineLayoutFillImage = makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2419 pipelineFillImage = makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2421 const VkDescriptorImageInfo descResultImageInfo = makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2422 const VkDescriptorBufferInfo descResultBufferInfo = makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2424 DescriptorSetUpdateBuilder()
2425 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2426 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2427 .update(vk, device);
2429 vk.cmdPipelineBarrier(*cmdBuffer,
2430 VK_PIPELINE_STAGE_HOST_BIT,
2431 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2432 (VkDependencyFlags)0,
2433 0, (const VkMemoryBarrier*)DE_NULL,
2434 0, (const VkBufferMemoryBarrier*)DE_NULL,
2435 1, &imageBarrierPre);
2437 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2438 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2440 vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2442 vk.cmdPipelineBarrier(*cmdBuffer,
2443 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2444 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2445 (VkDependencyFlags)0,
2446 0, (const VkMemoryBarrier*)DE_NULL,
2447 0, (const VkBufferMemoryBarrier*)DE_NULL,
2448 1, &imageBarrierPost);
2452 VkDeviceSize size = ((b == 0) ? sizeOutputR64 : sizeImageR64);
2453 const vector<VkBufferImageCopy> bufferImageCopy (1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2455 copyBufferToImage(vk,
2460 VK_IMAGE_ASPECT_COLOR_BIT,
2462 clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2470 const VkSamplerCreateInfo samplerParams =
2472 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
2473 DE_NULL, // const void* pNext;
2474 0, // VkSamplerCreateFlags flags;
2475 VK_FILTER_NEAREST, // VkFilter magFilter:
2476 VK_FILTER_NEAREST, // VkFilter minFilter;
2477 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
2478 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeU;
2479 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeV;
2480 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeW;
2481 0.0f, // float mipLodBias;
2482 VK_FALSE, // VkBool32 anistoropyEnable;
2483 1.0f, // float maxAnisotropy;
2484 VK_FALSE, // VkBool32 compareEnable;
2485 VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
2486 0.0f, // float minLod;
2487 0.0f, // float maxLod;
2488 formatIsFloat(m_data.format) ?
2489 VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2490 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
2491 VK_FALSE // VkBool32 unnormalizedCoordinates;
2494 Move<VkSampler> sampler (createSampler(vk, device, &samplerParams));
2496 // Flush modified memory.
2497 if (!m_data.nullDescriptor)
2498 flushAlloc(vk, device, buffer->getAllocation());
2500 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2502 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
2504 (VkPipelineLayoutCreateFlags)0,
2505 1u, // setLayoutCount
2506 &descriptorSetLayout.get(), // pSetLayouts
2507 0u, // pushConstantRangeCount
2508 DE_NULL, // pPushConstantRanges
2511 Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2513 de::MovePtr<BufferWithMemory> copyBuffer;
2514 copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2515 vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2518 vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2519 vector<VkDescriptorImageInfo> imageInfoVec(2);
2520 vector<VkBufferView> bufferViewVec(2);
2521 vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2525 #ifndef CTS_USES_VULKANSC
2526 vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2527 bufTemplateEntriesBefore,
2528 texelBufTemplateEntriesBefore;
2531 for (size_t b = 0; b < bindings.size(); ++b)
2533 VkDescriptorSetLayoutBinding &binding = bindings[b];
2534 // Construct the declaration for the binding
2535 if (binding.descriptorCount > 0)
2538 switch (binding.descriptorType)
2540 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2541 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2543 if (b == 1 && m_data.nullDescriptor)
2544 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2546 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2548 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2549 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2550 if (b == 1 && m_data.nullDescriptor)
2551 bufferViewVec[vecIndex] = DE_NULL;
2553 bufferViewVec[vecIndex] = **bufferViews[0];
2556 // Other descriptor types.
2557 if (b == 1 && m_data.nullDescriptor)
2558 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2560 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2564 VkWriteDescriptorSet w =
2566 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
2568 m_data.pushDescriptor ? DE_NULL : *descriptorSet, // dstSet
2569 (deUint32)b, // binding
2570 0, // dstArrayElement
2571 1u, // descriptorCount
2572 binding.descriptorType, // descriptorType
2573 &imageInfoVec[vecIndex], // pImageInfo
2574 &bufferInfoVec[vecIndex], // pBufferInfo
2575 &bufferViewVec[vecIndex], // pTexelBufferView
2578 #ifndef CTS_USES_VULKANSC
2579 VkDescriptorUpdateTemplateEntry templateEntry =
2581 (deUint32)b, // uint32_t dstBinding;
2582 0, // uint32_t dstArrayElement;
2583 1u, // uint32_t descriptorCount;
2584 binding.descriptorType, // VkDescriptorType descriptorType;
2585 0, // size_t offset;
2586 0, // size_t stride;
2589 switch (binding.descriptorType)
2591 default: DE_ASSERT(0); // Fallthrough
2592 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2593 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2594 templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2595 imgTemplateEntriesBefore.push_back(templateEntry);
2597 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2598 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2599 templateEntry.offset = vecIndex * sizeof(VkBufferView);
2600 texelBufTemplateEntriesBefore.push_back(templateEntry);
2602 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2603 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2604 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2605 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2606 templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2607 bufTemplateEntriesBefore.push_back(templateEntry);
2614 writesBeforeBindVec.push_back(w);
2616 // Count the number of dynamic descriptors in this set.
2617 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2618 binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2625 // Make zeros have at least one element so &zeros[0] works
2626 vector<deUint32> zeros(de::max(1,numDynamic));
2627 deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2629 // Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2630 if (m_data.useTemplate)
2632 #ifndef CTS_USES_VULKANSC
2633 VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2635 VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, // VkStructureType sType;
2636 NULL, // void* pNext;
2637 0, // VkDescriptorUpdateTemplateCreateFlags flags;
2638 0, // uint32_t descriptorUpdateEntryCount;
2639 DE_NULL, // uint32_t descriptorUpdateEntryCount;
2640 m_data.pushDescriptor ?
2641 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2642 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, // VkDescriptorUpdateTemplateType templateType;
2643 descriptorSetLayout.get(), // VkDescriptorSetLayout descriptorSetLayout;
2644 bindPoint, // VkPipelineBindPoint pipelineBindPoint;
2645 *pipelineLayout, // VkPipelineLayout pipelineLayout;
2649 void *templateVectorData[] =
2651 imageInfoVec.data(),
2652 bufferInfoVec.data(),
2653 bufferViewVec.data(),
2656 vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2658 &imgTemplateEntriesBefore,
2659 &bufTemplateEntriesBefore,
2660 &texelBufTemplateEntriesBefore,
2663 if (m_data.pushDescriptor)
2665 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2667 if (templateVectorsBefore[i]->size())
2669 templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2670 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2671 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2672 vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2678 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2680 if (templateVectorsBefore[i]->size())
2682 templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2683 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2684 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2685 vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2689 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2695 if (m_data.pushDescriptor)
2697 #ifndef CTS_USES_VULKANSC
2698 if (writesBeforeBindVec.size())
2700 vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2706 if (writesBeforeBindVec.size())
2708 vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2711 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2716 Move<VkPipeline> pipeline;
2717 Move<VkRenderPass> renderPass;
2718 Move<VkFramebuffer> framebuffer;
2720 de::MovePtr<BufferWithMemory> sbtBuffer;
2722 if (m_data.stage == STAGE_COMPUTE)
2724 const Unique<VkShaderModule> shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2726 pipeline = makeComputePipeline(vk, device, *pipelineLayout, *shader);
2729 #ifndef CTS_USES_VULKANSC
2730 else if (m_data.stage == STAGE_RAYGEN)
2732 const Unique<VkShaderModule> shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2734 const VkPipelineShaderStageCreateInfo shaderCreateInfo =
2736 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2738 (VkPipelineShaderStageCreateFlags)0,
2739 VK_SHADER_STAGE_RAYGEN_BIT_NV, // stage
2742 DE_NULL, // pSpecializationInfo
2745 VkRayTracingShaderGroupCreateInfoNV group =
2747 VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,
2749 VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, // type
2751 VK_SHADER_UNUSED_KHR, // closestHitShader
2752 VK_SHADER_UNUSED_KHR, // anyHitShader
2753 VK_SHADER_UNUSED_KHR, // intersectionShader
2756 VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2757 VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV, // sType
2761 &shaderCreateInfo, // pStages
2764 0, // maxRecursionDepth
2765 *pipelineLayout, // layout
2766 (vk::VkPipeline)0, // basePipelineHandle
2767 0u, // basePipelineIndex
2770 pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2772 sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2773 vk, device, allocator, makeBufferCreateInfo(rayTracingProperties.shaderGroupHandleSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2775 deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2776 invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2778 vk.getRayTracingShaderGroupHandlesKHR(device, *pipeline, 0, 1, rayTracingProperties.shaderGroupHandleSize, ptr);
2783 const VkSubpassDescription subpassDesc =
2785 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
2786 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
2787 0u, // deUint32 inputAttachmentCount
2788 DE_NULL, // const VkAttachmentReference* pInputAttachments
2789 0u, // deUint32 colorAttachmentCount
2790 DE_NULL, // const VkAttachmentReference* pColorAttachments
2791 DE_NULL, // const VkAttachmentReference* pResolveAttachments
2792 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
2793 0u, // deUint32 preserveAttachmentCount
2794 DE_NULL // const deUint32* pPreserveAttachments
2797 const VkSubpassDependency subpassDependency =
2799 VK_SUBPASS_EXTERNAL, // deUint32 srcSubpass
2800 0, // deUint32 dstSubpass
2801 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags srcStageMask
2802 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // VkPipelineStageFlags dstStageMask
2803 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
2804 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
2805 VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags
2808 const VkRenderPassCreateInfo renderPassParams =
2810 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
2811 DE_NULL, // const void* pNext
2812 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
2813 0u, // deUint32 attachmentCount
2814 DE_NULL, // const VkAttachmentDescription* pAttachments
2815 1u, // deUint32 subpassCount
2816 &subpassDesc, // const VkSubpassDescription* pSubpasses
2817 1u, // deUint32 dependencyCount
2818 &subpassDependency // const VkSubpassDependency* pDependencies
2821 renderPass = createRenderPass(vk, device, &renderPassParams);
2823 const vk::VkFramebufferCreateInfo framebufferParams =
2825 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
2827 (vk::VkFramebufferCreateFlags)0,
2828 *renderPass, // renderPass
2829 0u, // attachmentCount
2830 DE_NULL, // pAttachments
2836 framebuffer = createFramebuffer(vk, device, &framebufferParams);
2838 const VkVertexInputBindingDescription vertexInputBindingDescription =
2840 0u, // deUint32 binding
2841 (deUint32)formatBytes, // deUint32 stride
2842 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate
2845 const VkVertexInputAttributeDescription vertexInputAttributeDescription =
2847 0u, // deUint32 location
2848 0u, // deUint32 binding
2849 m_data.format, // VkFormat format
2850 0u // deUint32 offset
2853 deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2855 VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
2857 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2858 DE_NULL, // const void* pNext;
2859 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
2860 numAttribs, // deUint32 vertexBindingDescriptionCount;
2861 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2862 numAttribs, // deUint32 vertexAttributeDescriptionCount;
2863 &vertexInputAttributeDescription // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2866 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo =
2868 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
2869 DE_NULL, // const void* pNext;
2870 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
2871 (m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology topology;
2872 VK_FALSE // VkBool32 primitiveRestartEnable;
2875 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo =
2877 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
2878 DE_NULL, // const void* pNext;
2879 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
2880 VK_FALSE, // VkBool32 depthClampEnable;
2881 (m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE, // VkBool32 rasterizerDiscardEnable;
2882 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
2883 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
2884 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
2885 VK_FALSE, // VkBool32 depthBiasEnable;
2886 0.0f, // float depthBiasConstantFactor;
2887 0.0f, // float depthBiasClamp;
2888 0.0f, // float depthBiasSlopeFactor;
2889 1.0f // float lineWidth;
2892 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo =
2894 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType
2895 DE_NULL, // const void* pNext
2896 0u, // VkPipelineMultisampleStateCreateFlags flags
2897 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples
2898 VK_FALSE, // VkBool32 sampleShadingEnable
2899 1.0f, // float minSampleShading
2900 DE_NULL, // const VkSampleMask* pSampleMask
2901 VK_FALSE, // VkBool32 alphaToCoverageEnable
2902 VK_FALSE // VkBool32 alphaToOneEnable
2905 VkViewport viewport = makeViewport(DIM, DIM);
2906 VkRect2D scissor = makeRect2D(DIM, DIM);
2908 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo =
2910 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
2911 DE_NULL, // const void* pNext
2912 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
2913 1u, // deUint32 viewportCount
2914 &viewport, // const VkViewport* pViewports
2915 1u, // deUint32 scissorCount
2916 &scissor // const VkRect2D* pScissors
2919 Move<VkShaderModule> fs;
2920 Move<VkShaderModule> vs;
2923 if (m_data.stage == STAGE_VERTEX)
2925 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2926 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2931 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2932 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2936 VkPipelineShaderStageCreateInfo shaderCreateInfo[2] =
2939 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2941 (VkPipelineShaderStageCreateFlags)0,
2942 VK_SHADER_STAGE_VERTEX_BIT, // stage
2945 DE_NULL, // pSpecializationInfo
2948 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2950 (VkPipelineShaderStageCreateFlags)0,
2951 VK_SHADER_STAGE_FRAGMENT_BIT, // stage
2954 DE_NULL, // pSpecializationInfo
2958 VkGraphicsPipelineCreateInfo graphicsPipelineCreateInfo =
2960 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
2961 DE_NULL, // const void* pNext;
2962 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
2963 numStages, // deUint32 stageCount;
2964 &shaderCreateInfo[0], // const VkPipelineShaderStageCreateInfo* pStages;
2965 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
2966 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
2967 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
2968 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
2969 &rasterizationStateCreateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
2970 &multisampleStateCreateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
2971 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
2972 DE_NULL, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
2973 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
2974 pipelineLayout.get(), // VkPipelineLayout layout;
2975 renderPass.get(), // VkRenderPass renderPass;
2976 0u, // deUint32 subpass;
2977 DE_NULL, // VkPipeline basePipelineHandle;
2978 0 // int basePipelineIndex;
2981 #ifndef CTS_USES_VULKANSC
2982 VkPipelineRobustnessCreateInfoEXT pipelineRobustnessInfo;
2983 if (m_data.testPipelineRobustness)
2985 pipelineRobustnessInfo = getPipelineRobustnessInfo(m_data.testRobustness2, m_data.descriptorType);
2987 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2989 graphicsPipelineCreateInfo.pNext = &pipelineRobustnessInfo;
2991 else if (m_data.stage == STAGE_VERTEX)
2993 shaderCreateInfo[0].pNext = &pipelineRobustnessInfo;
2997 shaderCreateInfo[1].pNext = &pipelineRobustnessInfo;
3002 pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
3005 const VkImageMemoryBarrier imageBarrier =
3007 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
3008 DE_NULL, // const void* pNext
3009 0u, // VkAccessFlags srcAccessMask
3010 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
3011 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
3012 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
3013 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
3014 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
3015 **images[0], // VkImage image
3017 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
3018 0u, // uint32_t baseMipLevel
3019 1u, // uint32_t mipLevels,
3020 0u, // uint32_t baseArray
3021 1u, // uint32_t arraySize
3025 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
3026 (VkDependencyFlags)0,
3027 0, (const VkMemoryBarrier*)DE_NULL,
3028 0, (const VkBufferMemoryBarrier*)DE_NULL,
3031 vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
3033 if (!formatIsR64(m_data.format))
3035 VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
3036 VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
3038 vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
3042 const vector<VkBufferImageCopy> bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
3043 copyBufferToImage(vk,
3045 *(*bufferOutputImageR64),
3048 VK_IMAGE_ASPECT_COLOR_BIT,
3050 1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
3053 VkMemoryBarrier memBarrier =
3055 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
3057 0u, // srcAccessMask
3058 0u, // dstAccessMask
3061 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
3062 memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3063 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
3064 0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3066 if (m_data.stage == STAGE_COMPUTE)
3068 vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
3070 #ifndef CTS_USES_VULKANSC
3071 else if (m_data.stage == STAGE_RAYGEN)
3073 vk.cmdTraceRaysNV(*cmdBuffer,
3083 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
3084 makeRect2D(DIM, DIM),
3085 0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
3086 // Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
3087 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
3089 VkDeviceSize zeroOffset = 0;
3090 VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
3091 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
3092 vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
3094 if (m_data.stage == STAGE_VERTEX)
3096 vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
3100 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
3102 endRenderPass(vk, *cmdBuffer);
3105 memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
3106 memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
3107 vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
3108 0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
3110 const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
3111 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
3112 vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, ©Region);
3114 endCommandBuffer(vk, *cmdBuffer);
3116 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
3118 void *ptr = copyBuffer->getAllocation().getHostPtr();
3120 invalidateAlloc(vk, device, copyBuffer->getAllocation());
3122 qpTestResult res = QP_TEST_RESULT_PASS;
3124 for (deUint32 i = 0; i < DIM*DIM; ++i)
3126 if (formatIsFloat(m_data.format))
3128 if (((float *)ptr)[i * numComponents] != 1.0f)
3130 res = QP_TEST_RESULT_FAIL;
3133 else if (formatIsR64(m_data.format))
3135 if (((deUint64 *)ptr)[i * numComponents] != 1)
3137 res = QP_TEST_RESULT_FAIL;
3142 if (((deUint32 *)ptr)[i * numComponents] != 1)
3144 res = QP_TEST_RESULT_FAIL;
3149 return tcu::TestStatus(res, qpGetTestResultName(res));
3154 static void createTests (tcu::TestCaseGroup* group, bool robustness2, bool pipelineRobustness)
3156 tcu::TestContext& testCtx = group->getTestContext();
3162 const char* description;
3165 TestGroupCase fmtCases[] =
3167 { VK_FORMAT_R32_SINT, "r32i", "" },
3168 { VK_FORMAT_R32_UINT, "r32ui", "" },
3169 { VK_FORMAT_R32_SFLOAT, "r32f", "" },
3170 { VK_FORMAT_R32G32_SINT, "rg32i", "" },
3171 { VK_FORMAT_R32G32_UINT, "rg32ui", "" },
3172 { VK_FORMAT_R32G32_SFLOAT, "rg32f", "" },
3173 { VK_FORMAT_R32G32B32A32_SINT, "rgba32i", "" },
3174 { VK_FORMAT_R32G32B32A32_UINT, "rgba32ui", "" },
3175 { VK_FORMAT_R32G32B32A32_SFLOAT, "rgba32f", "" },
3176 { VK_FORMAT_R64_SINT, "r64i", "" },
3177 { VK_FORMAT_R64_UINT, "r64ui", "" },
3180 TestGroupCase fullDescCases[] =
3182 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, "uniform_buffer", "" },
3183 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, "storage_buffer", "" },
3184 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, "uniform_buffer_dynamic", "" },
3185 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, "storage_buffer_dynamic", "" },
3186 { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer", "" },
3187 { VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer", "" },
3188 { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image", "" },
3189 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image", "" },
3190 { VERTEX_ATTRIBUTE_FETCH, "vertex_attribute_fetch", "" },
3193 TestGroupCase imgDescCases[] =
3195 { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image", "" },
3196 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image", "" },
3199 TestGroupCase fullLenCases32Bit[] =
3201 { ~0U, "null_descriptor", "" },
3205 { 12, "len_12", "" },
3206 { 16, "len_16", "" },
3207 { 20, "len_20", "" },
3208 { 31, "len_31", "" },
3209 { 32, "len_32", "" },
3210 { 33, "len_33", "" },
3211 { 35, "len_35", "" },
3212 { 36, "len_36", "" },
3213 { 39, "len_39", "" },
3214 { 40, "len_41", "" },
3215 { 252, "len_252", "" },
3216 { 256, "len_256", "" },
3217 { 260, "len_260", "" },
3220 TestGroupCase fullLenCases64Bit[] =
3222 { ~0U, "null_descriptor", "" },
3225 { 16, "len_16", "" },
3226 { 24, "len_24", "" },
3227 { 32, "len_32", "" },
3228 { 40, "len_40", "" },
3229 { 62, "len_62", "" },
3230 { 64, "len_64", "" },
3231 { 66, "len_66", "" },
3232 { 70, "len_70", "" },
3233 { 72, "len_72", "" },
3234 { 78, "len_78", "" },
3235 { 80, "len_80", "" },
3236 { 504, "len_504", "" },
3237 { 512, "len_512", "" },
3238 { 520, "len_520", "" },
3241 TestGroupCase imgLenCases[] =
3246 TestGroupCase viewCases[] =
3248 { VK_IMAGE_VIEW_TYPE_1D, "1d", "" },
3249 { VK_IMAGE_VIEW_TYPE_2D, "2d", "" },
3250 { VK_IMAGE_VIEW_TYPE_3D, "3d", "" },
3251 { VK_IMAGE_VIEW_TYPE_CUBE, "cube", "" },
3252 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array", "" },
3253 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array", "" },
3254 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array", "" },
3257 TestGroupCase sampCases[] =
3259 { VK_SAMPLE_COUNT_1_BIT, "samples_1", "" },
3260 { VK_SAMPLE_COUNT_4_BIT, "samples_4", "" },
3263 TestGroupCase stageCases[] =
3265 { STAGE_COMPUTE, "comp", "compute" },
3266 { STAGE_FRAGMENT, "frag", "fragment" },
3267 { STAGE_VERTEX, "vert", "vertex" },
3268 #ifndef CTS_USES_VULKANSC
3269 { STAGE_RAYGEN, "rgen", "raygen" },
3273 TestGroupCase volCases[] =
3275 { 0, "nonvolatile", "" },
3276 { 1, "volatile", "" },
3279 TestGroupCase unrollCases[] =
3281 { 0, "dontunroll", "" },
3282 { 1, "unroll", "" },
3285 TestGroupCase tempCases[] =
3287 { 0, "notemplate", "" },
3288 #ifndef CTS_USES_VULKANSC
3289 { 1, "template", "" },
3293 TestGroupCase pushCases[] =
3296 #ifndef CTS_USES_VULKANSC
3301 TestGroupCase fmtQualCases[] =
3303 { 0, "no_fmt_qual", "" },
3304 { 1, "fmt_qual", "" },
3307 TestGroupCase readOnlyCases[] =
3309 { 0, "readwrite", "" },
3310 { 1, "readonly", "" },
3313 for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3315 de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
3316 for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3318 de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name, tempCases[tempNdx].name));
3319 for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3321 de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name, fmtCases[fmtNdx].name));
3323 // Avoid too much duplication by excluding certain test cases
3324 if (pipelineRobustness &&
3325 !(fmtCases[fmtNdx].count == VK_FORMAT_R32_UINT || fmtCases[fmtNdx].count == VK_FORMAT_R32G32B32A32_SFLOAT || fmtCases[fmtNdx].count == VK_FORMAT_R64_SINT))
3330 int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3332 for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3334 de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name, unrollCases[unrollNdx].name));
3336 // Avoid too much duplication by excluding certain test cases
3337 if (unrollNdx > 0 && pipelineRobustness)
3340 for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3342 de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name, volCases[volNdx].name));
3344 int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3345 TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3347 for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3349 de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
3351 // Avoid too much duplication by excluding certain test cases
3352 if (pipelineRobustness &&
3353 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3354 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3359 for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3361 de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name, readOnlyCases[roNdx].name));
3363 // readonly cases are just for storage_buffer
3364 if (readOnlyCases[roNdx].count != 0 &&
3365 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3366 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3369 if (pipelineRobustness &&
3370 readOnlyCases[roNdx].count != 0)
3375 for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3377 de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
3379 // format qualifier is only used for storage image and storage texel buffers
3380 if (fmtQualCases[fmtQualNdx].count &&
3381 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3384 if (pushCases[pushNdx].count &&
3385 (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3388 const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3389 int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3390 TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3392 for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3394 if (lenCases[lenNdx].count != ~0U)
3396 bool bufferLen = lenCases[lenNdx].count != 0;
3397 bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3398 if (bufferLen != bufferDesc)
3401 // Add template tests cases only for null_descriptor cases
3402 if (tempCases[tempNdx].count)
3406 if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3407 ((lenCases[lenNdx].count % fmtSize) != 0) &&
3408 lenCases[lenNdx].count != ~0U)
3413 // Avoid too much duplication by excluding certain test cases
3414 if (pipelineRobustness && robustness2 &&
3415 (lenCases[lenNdx].count == 0 || ((lenCases[lenNdx].count & (lenCases[lenNdx].count - 1)) != 0)))
3420 // "volatile" only applies to storage images/buffers
3421 if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3425 de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
3426 for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3428 de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
3430 // Avoid too much duplication by excluding certain test cases
3431 if (pipelineRobustness && sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3434 for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3436 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3437 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3438 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3440 // buffer descriptors don't have different dimensionalities. Only test "1D"
3444 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3445 sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3450 // Avoid too much duplication by excluding certain test cases
3451 if (pipelineRobustness &&
3452 !(viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_1D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D || viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_2D_ARRAY))
3457 de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
3458 for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3460 Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3461 VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3462 VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3463 #ifndef CTS_USES_VULKANSC
3464 if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3466 allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
3467 allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
3469 if (pipelineRobustness)
3472 #endif // CTS_USES_VULKANSC
3473 if ((lenCases[lenNdx].count == ~0U) && pipelineRobustness)
3476 if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3477 currentStage != STAGE_VERTEX)
3480 deUint32 imageDim[3] = {5, 11, 6};
3481 if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3482 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3483 imageDim[1] = imageDim[0];
3487 (VkFormat)fmtCases[fmtNdx].count, // VkFormat format;
3488 currentStage, // Stage stage;
3489 allShaderStages, // VkFlags allShaderStages;
3490 allPipelineStages, // VkFlags allPipelineStages;
3491 (int)descCases[descNdx].count, // VkDescriptorType descriptorType;
3492 (VkImageViewType)viewCases[viewNdx].count, // VkImageViewType viewType;
3493 (VkSampleCountFlagBits)sampCases[sampNdx].count, // VkSampleCountFlagBits samples;
3494 (int)lenCases[lenNdx].count, // int bufferLen;
3495 (bool)unrollCases[unrollNdx].count, // bool unroll;
3496 (bool)volCases[volNdx].count, // bool vol;
3497 (bool)(lenCases[lenNdx].count == ~0U), // bool nullDescriptor
3498 (bool)tempCases[tempNdx].count, // bool useTemplate
3499 (bool)fmtQualCases[fmtQualNdx].count, // bool formatQualifier
3500 (bool)pushCases[pushNdx].count, // bool pushDescriptor;
3501 (bool)robustness2, // bool testRobustness2;
3502 (bool)pipelineRobustness, // bool testPipelineRobustness;
3503 { imageDim[0], imageDim[1], imageDim[2] }, // deUint32 imageDim[3];
3504 (bool)(readOnlyCases[roNdx].count == 1), // bool readOnly;
3507 viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
3509 sampGroup->addChild(viewGroup.release());
3511 lenGroup->addChild(sampGroup.release());
3513 fmtQualGroup->addChild(lenGroup.release());
3515 // Put storage_buffer tests in separate readonly vs readwrite groups. Other types
3516 // go directly into descGroup
3517 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3518 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3519 rwGroup->addChild(fmtQualGroup.release());
3521 descGroup->addChild(fmtQualGroup.release());
3524 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3525 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3526 descGroup->addChild(rwGroup.release());
3529 volGroup->addChild(descGroup.release());
3531 unrollGroup->addChild(volGroup.release());
3533 fmtGroup->addChild(unrollGroup.release());
3535 tempGroup->addChild(fmtGroup.release());
3537 pushGroup->addChild(tempGroup.release());
3539 group->addChild(pushGroup.release());
3543 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3545 createTests(group, /*robustness2=*/true, /*pipelineRobustness=*/false);
3548 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3550 createTests(group, /*robustness2=*/false, /*pipelineRobustness=*/false);
3553 #ifndef CTS_USES_VULKANSC
3554 static void createPipelineRobustnessTests (tcu::TestCaseGroup* group)
3556 tcu::TestContext& testCtx = group->getTestContext();
3558 tcu::TestCaseGroup *robustness2Group = new tcu::TestCaseGroup(testCtx, "robustness2", "robustness2");
3560 createTests(robustness2Group, /*robustness2=*/true, /*pipelineRobustness=*/true);
3562 group->addChild(robustness2Group);
3564 tcu::TestCaseGroup *imageRobustness2Group = new tcu::TestCaseGroup(testCtx, "image_robustness", "image_robustness");
3566 createTests(imageRobustness2Group, /*robustness2=*/false, /*pipelineRobustness=*/true);
3568 group->addChild(imageRobustness2Group);
3572 static void cleanupGroup (tcu::TestCaseGroup* group)
3575 // Destroy singleton objects.
3576 Robustness2Int64AtomicsSingleton::destroy();
3577 ImageRobustnessInt64AtomicsSingleton::destroy();
3578 ImageRobustnessSingleton::destroy();
3579 Robustness2Singleton::destroy();
3580 PipelineRobustnessImageRobustnessSingleton::destroy();
3581 PipelineRobustnessRobustness2Singleton::destroy();
3582 PipelineRobustnessImageRobustnessInt64AtomicsSingleton::destroy();
3583 PipelineRobustnessRobustness2Int64AtomicsSingleton::destroy();
3584 Robustness2Int64AtomicsScalarSingleton::destroy();
3585 ImageRobustnessInt64AtomicsScalarSingleton::destroy();
3586 ImageRobustnessScalarSingleton::destroy();
3587 Robustness2ScalarSingleton::destroy();
3588 PipelineRobustnessImageRobustnessScalarSingleton::destroy();
3589 PipelineRobustnessRobustness2ScalarSingleton::destroy();
3590 PipelineRobustnessImageRobustnessInt64AtomicsScalarSingleton::destroy();
3591 PipelineRobustnessRobustness2Int64AtomicsScalarSingleton::destroy();
3594 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
3596 return createTestGroup(testCtx, "robustness2", "VK_EXT_robustness2 tests",
3597 createRobustness2Tests, cleanupGroup);
3600 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
3602 return createTestGroup(testCtx, "image_robustness", "VK_EXT_image_robustness tests",
3603 createImageRobustnessTests, cleanupGroup);
3606 #ifndef CTS_USES_VULKANSC
3607 tcu::TestCaseGroup* createPipelineRobustnessTests (tcu::TestContext& testCtx)
3609 return createTestGroup(testCtx, "pipeline_robustness", "VK_EXT_pipeline_robustness tests",
3610 createPipelineRobustnessTests, cleanupGroup);