1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2017-2019 The Khronos Group Inc.
6 * Copyright (c) 2018-2020 NVIDIA Corporation
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Vulkan robustness2 tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktRobustnessExtsTests.hpp"
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkDeviceUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkCmdUtil.hpp"
34 #include "vkTypeUtil.hpp"
35 #include "vkObjUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vktRobustnessUtil.hpp"
39 #include "vktTestGroupUtil.hpp"
40 #include "vktTestCase.hpp"
45 #include "deSharedPtr.hpp"
48 #include "tcuVectorType.hpp"
49 #include "tcuTestCase.hpp"
50 #include "tcuTestLog.hpp"
67 enum RobustnessFeatureBits
69 RF_IMG_ROBUSTNESS = (1 ),
70 RF_ROBUSTNESS2 = (1 << 1 ),
71 SIF_INT64ATOMICS = (1 << 2 ),
74 using RobustnessFeatures = deUint32;
76 // Class to wrap a singleton device with the indicated robustness features.
77 template <RobustnessFeatures FEATURES>
80 SingletonDevice (Context& context)
81 : m_context(context), m_instanceWrapper(new CustomInstanceWrapper(context)), m_logicalDevice ()
83 // Note we are already checking the needed features are available in checkSupport().
84 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
85 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
86 VkPhysicalDeviceScalarBlockLayoutFeatures scalarBlockLayoutFeatures = initVulkanStructure();
87 VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT shaderImageAtomicInt64Features = initVulkanStructure();
88 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
90 features2.pNext = &scalarBlockLayoutFeatures;
92 if (FEATURES & RF_IMG_ROBUSTNESS)
94 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
95 imageRobustnessFeatures.pNext = features2.pNext;
96 features2.pNext = &imageRobustnessFeatures;
99 if (FEATURES & RF_ROBUSTNESS2)
101 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
102 robustness2Features.pNext = features2.pNext;
103 features2.pNext = &robustness2Features;
106 if (FEATURES & SIF_INT64ATOMICS)
108 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"));
109 shaderImageAtomicInt64Features.pNext = features2.pNext;
110 features2.pNext = &shaderImageAtomicInt64Features;
113 const VkPhysicalDevice physicalDevice = chooseDevice(m_instanceWrapper->instance.getDriver(), m_instanceWrapper->instance, context.getTestContext().getCommandLine());
114 m_instanceWrapper->instance.getDriver().getPhysicalDeviceFeatures2(physicalDevice, &features2);
115 m_logicalDevice = createRobustBufferAccessDevice(context, m_instanceWrapper->instance, m_instanceWrapper->instance.getDriver(), &features2);
117 #ifndef CTS_USES_VULKANSC
118 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), m_instanceWrapper->instance, *m_logicalDevice));
120 m_deviceDriver = de::MovePtr<DeviceDriverSC, DeinitDeviceDeleter>(new DeviceDriverSC(context.getPlatformInterface(), m_instanceWrapper->instance, *m_logicalDevice, context.getTestContext().getCommandLine(), context.getResourceInterface(), m_context.getDeviceVulkanSC10Properties()), vk::DeinitDeviceDeleter(context.getResourceInterface().get(), *m_logicalDevice));
121 #endif // CTS_USES_VULKANSC
128 static VkInstance getInstance(Context& context)
130 if (!m_singletonDevice)
131 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
132 DE_ASSERT(m_singletonDevice);
133 return m_singletonDevice->m_instanceWrapper->instance;
135 static const InstanceInterface& getInstanceInterface(Context& context)
137 if (!m_singletonDevice)
138 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
139 DE_ASSERT(m_singletonDevice);
140 return m_singletonDevice->m_instanceWrapper->instance.getDriver();
143 static VkDevice getDevice(Context& context)
145 if (!m_singletonDevice)
146 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
147 DE_ASSERT(m_singletonDevice);
148 return m_singletonDevice->m_logicalDevice.get();
150 static const DeviceInterface& getDeviceInterface(Context& context)
152 if (!m_singletonDevice)
153 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
154 DE_ASSERT(m_singletonDevice);
155 return *(m_singletonDevice->m_deviceDriver.get());
158 static void destroy()
160 m_singletonDevice.clear();
164 const Context& m_context;
165 std::shared_ptr<CustomInstanceWrapper> m_instanceWrapper;
166 Move<vk::VkDevice> m_logicalDevice;
167 #ifndef CTS_USES_VULKANSC
168 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
170 de::MovePtr<vk::DeviceDriverSC, vk::DeinitDeviceDeleter> m_deviceDriver;
171 #endif // CTS_USES_VULKANSC
173 static SharedPtr<SingletonDevice<FEATURES>> m_singletonDevice;
176 template <RobustnessFeatures FEATURES>
177 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
179 constexpr RobustnessFeatures kImageRobustness = RF_IMG_ROBUSTNESS;
180 constexpr RobustnessFeatures kRobustness2 = RF_ROBUSTNESS2;
181 constexpr RobustnessFeatures kShaderImageInt64Atomics = SIF_INT64ATOMICS;
183 using ImageRobustnessSingleton = SingletonDevice<kImageRobustness>;
184 using Robustness2Singleton = SingletonDevice<kRobustness2>;
186 using ImageRobustnessInt64AtomicsSingleton = SingletonDevice<kImageRobustness | kShaderImageInt64Atomics>;
187 using Robustness2Int64AtomicsSingleton = SingletonDevice<kRobustness2 | kShaderImageInt64Atomics>;
189 // Render target / compute grid dimensions
190 static const deUint32 DIM = 8;
192 // treated as a phony VkDescriptorType value
193 #define VERTEX_ATTRIBUTE_FETCH 999
207 VkFlags allShaderStages;
208 VkFlags allPipelineStages;
209 int/*VkDescriptorType*/ descriptorType;
210 VkImageViewType viewType;
211 VkSampleCountFlagBits samples;
217 bool formatQualifier;
219 bool testRobustness2;
220 deUint32 imageDim[3]; // width, height, depth or layers
224 static bool formatIsR64(const VkFormat& f)
228 case VK_FORMAT_R64_SINT:
229 case VK_FORMAT_R64_UINT:
236 // Returns the appropriate singleton device for the given case.
237 VkInstance getInstance(Context& ctx, const CaseDef& caseDef)
239 if (formatIsR64(caseDef.format))
241 if (caseDef.testRobustness2)
242 return Robustness2Int64AtomicsSingleton::getInstance(ctx);
243 return ImageRobustnessInt64AtomicsSingleton::getInstance(ctx);
246 if (caseDef.testRobustness2)
247 return Robustness2Singleton::getInstance(ctx);
248 return ImageRobustnessSingleton::getInstance(ctx);
251 // Returns the appropriate singleton device driver for the given case.
252 const InstanceInterface& getInstanceInterface(Context& ctx, const CaseDef& caseDef)
254 if (formatIsR64(caseDef.format))
256 if (caseDef.testRobustness2)
257 return Robustness2Int64AtomicsSingleton::getInstanceInterface(ctx);
258 return ImageRobustnessInt64AtomicsSingleton::getInstanceInterface(ctx);
261 if (caseDef.testRobustness2)
262 return Robustness2Singleton::getInstanceInterface(ctx);
263 return ImageRobustnessSingleton::getInstanceInterface(ctx);
266 // Returns the appropriate singleton device for the given case.
267 VkDevice getLogicalDevice (Context& ctx, const CaseDef& caseDef)
269 if (formatIsR64(caseDef.format))
271 if (caseDef.testRobustness2)
272 return Robustness2Int64AtomicsSingleton::getDevice(ctx);
273 return ImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
276 if (caseDef.testRobustness2)
277 return Robustness2Singleton::getDevice(ctx);
278 return ImageRobustnessSingleton::getDevice(ctx);
281 // Returns the appropriate singleton device driver for the given case.
282 const DeviceInterface& getDeviceInterface(Context& ctx, const CaseDef& caseDef)
284 if (formatIsR64(caseDef.format))
286 if (caseDef.testRobustness2)
287 return Robustness2Int64AtomicsSingleton::getDeviceInterface(ctx);
288 return ImageRobustnessInt64AtomicsSingleton::getDeviceInterface(ctx);
291 if (caseDef.testRobustness2)
292 return Robustness2Singleton::getDeviceInterface(ctx);
293 return ImageRobustnessSingleton::getDeviceInterface(ctx);
300 vector<VkDescriptorSetLayoutBinding> layoutBindings;
301 vector<deUint8> refData;
305 class RobustnessExtsTestInstance : public TestInstance
308 RobustnessExtsTestInstance (Context& context, const CaseDef& data);
309 ~RobustnessExtsTestInstance (void);
310 tcu::TestStatus iterate (void);
315 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
316 : vkt::TestInstance (context)
321 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
325 class RobustnessExtsTestCase : public TestCase
328 RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data);
329 ~RobustnessExtsTestCase (void);
330 virtual void initPrograms (SourceCollections& programCollection) const;
331 virtual TestInstance* createInstance (Context& context) const;
332 virtual void checkSupport (Context& context) const;
338 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data)
339 : vkt::TestCase (context, name, desc)
344 RobustnessExtsTestCase::~RobustnessExtsTestCase (void)
348 static bool formatIsFloat(const VkFormat& f)
352 case VK_FORMAT_R32_SFLOAT:
353 case VK_FORMAT_R32G32_SFLOAT:
354 case VK_FORMAT_R32G32B32A32_SFLOAT:
361 static bool formatIsSignedInt(const VkFormat& f)
365 case VK_FORMAT_R32_SINT:
366 case VK_FORMAT_R64_SINT:
367 case VK_FORMAT_R32G32_SINT:
368 case VK_FORMAT_R32G32B32A32_SINT:
375 static bool supportsStores(int descriptorType)
377 switch (descriptorType)
379 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
380 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
381 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
382 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
389 void RobustnessExtsTestCase::checkSupport(Context& context) const
391 const auto& vki = context.getInstanceInterface();
392 const auto physicalDevice = context.getPhysicalDevice();
394 // We need to query feature support using the physical device instead of using the reported context features because robustness2
395 // and image robustness are always disabled in the default device but they may be available.
396 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
397 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
398 VkPhysicalDeviceScalarBlockLayoutFeatures scalarLayoutFeatures = initVulkanStructure();
399 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
401 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
403 context.requireDeviceFunctionality("VK_EXT_scalar_block_layout");
404 features2.pNext = &scalarLayoutFeatures;
406 if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
408 imageRobustnessFeatures.pNext = features2.pNext;
409 features2.pNext = &imageRobustnessFeatures;
412 if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
414 robustness2Features.pNext = features2.pNext;
415 features2.pNext = &robustness2Features;
418 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
420 if (formatIsR64(m_data.format))
422 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
424 VkFormatProperties formatProperties;
425 vki.getPhysicalDeviceFormatProperties(physicalDevice, m_data.format, &formatProperties);
427 switch (m_data.descriptorType)
429 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
430 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
431 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
433 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
434 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
435 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
437 case VERTEX_ATTRIBUTE_FETCH:
438 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
439 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
441 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
442 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
443 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
445 default: DE_ASSERT(true);
448 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
450 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
451 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
455 // Check needed properties and features
456 if (!scalarLayoutFeatures.scalarBlockLayout)
457 TCU_THROW(NotSupportedError, "Scalar block layout not supported");
459 if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
460 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
462 if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
463 TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
465 if (m_data.stage == STAGE_RAYGEN)
466 context.requireDeviceFunctionality("VK_NV_ray_tracing");
468 switch (m_data.descriptorType)
470 default: DE_ASSERT(0); // Fallthrough
471 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
472 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
473 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
474 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
475 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
476 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
477 case VERTEX_ATTRIBUTE_FETCH:
478 if (m_data.testRobustness2)
480 if (!robustness2Features.robustBufferAccess2)
481 TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
485 // This case is not tested here.
489 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
490 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
491 if (m_data.testRobustness2)
493 if (!robustness2Features.robustImageAccess2)
494 TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
498 if (!imageRobustnessFeatures.robustImageAccess)
499 TCU_THROW(NotSupportedError, "robustImageAccess not supported");
504 if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
505 TCU_THROW(NotSupportedError, "nullDescriptor not supported");
507 // The fill shader for 64-bit multisample image tests uses a storage image.
508 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
509 !features2.features.shaderStorageImageMultisample)
510 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
512 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
513 m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
514 !features2.features.shaderStorageImageMultisample)
515 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
517 if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(0, 1, 1, 0)))
518 TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
520 #ifndef CTS_USES_VULKANSC
521 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
522 !m_data.formatQualifier)
524 const VkFormatProperties3 formatProperties = context.getFormatProperties(m_data.format);
525 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
526 TCU_THROW(NotSupportedError, "Format does not support reading without format");
527 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
528 TCU_THROW(NotSupportedError, "Format does not support writing without format");
531 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
532 !m_data.formatQualifier &&
533 (!features2.features.shaderStorageImageReadWithoutFormat || !features2.features.shaderStorageImageWriteWithoutFormat))
534 TCU_THROW(NotSupportedError, "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
535 #endif // CTS_USES_VULKANSC
537 if (m_data.pushDescriptor)
538 context.requireDeviceFunctionality("VK_KHR_push_descriptor");
540 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
541 TCU_THROW(NotSupportedError, "Cube array image view type not supported");
543 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
544 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
547 void generateLayout(Layout &layout, const CaseDef &caseDef)
549 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
550 int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
551 bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
553 for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
555 VkDescriptorSetLayoutBinding &binding = bindings[b];
557 binding.pImmutableSamplers = NULL;
558 binding.stageFlags = caseDef.allShaderStages;
559 binding.descriptorCount = 1;
563 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
564 else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
565 binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
568 if (caseDef.nullDescriptor)
571 if (caseDef.bufferLen == 0)
573 // Clear color values for image tests
574 static deUint32 urefData[4] = { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
575 static deUint64 urefData64[4] = { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
576 static float frefData[4] = { 123.f, 234.f, 345.f, 456.f };
578 if (formatIsR64(caseDef.format))
580 layout.refData.resize(32);
581 deUint64 *ptr = (deUint64 *)layout.refData.data();
583 for (unsigned int i = 0; i < 4; ++i)
585 ptr[i] = urefData64[i];
590 layout.refData.resize(16);
591 deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
596 layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
597 for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
599 if (formatIsFloat(caseDef.format))
601 float *f = (float *)layout.refData.data() + i;
602 *f = 2.0f*(float)i + 3.0f;
604 if (formatIsR64(caseDef.format))
606 deUint64 *u = (deUint64 *)layout.refData.data() + i;
611 int *u = (int *)layout.refData.data() + i;
618 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
621 // Fetch from the descriptor.
622 switch (caseDef.descriptorType)
624 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
625 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
626 s << vecType << "(ubo0_1.val[" << coord << "]";
627 for (int i = numComponents; i < 4; ++i) s << ", 0";
630 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
631 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
632 s << vecType << "(ssbo0_1.val[" << coord << "]";
633 for (int i = numComponents; i < 4; ++i) s << ", 0";
636 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
637 s << "texelFetch(texbo0_1, " << coord << ")";
639 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
640 s << "imageLoad(image0_1, " << coord << ")";
642 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
643 if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
644 s << "texelFetch(texture0_1, " << coord << ")";
646 s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
648 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
649 s << "imageLoad(image0_1, " << coord << ")";
651 case VERTEX_ATTRIBUTE_FETCH:
654 default: DE_ASSERT(0);
659 static const int storeValue = 123;
661 // Get the value stored by genStore.
662 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
665 switch (descriptorType)
667 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
668 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
669 s << vecType << "(" << bufType << "(" << storeValue << ")";
670 for (int i = numComponents; i < 4; ++i) s << ", 0";
673 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
674 s << vecType << "(" << storeValue << ")";
676 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
677 s << vecType << "(" << storeValue << ")";
679 default: DE_ASSERT(0);
684 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
687 // Store to the descriptor.
688 switch (descriptorType)
690 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
691 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
692 s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
694 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
695 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
697 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
698 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
700 default: DE_ASSERT(0);
705 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
708 // Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
709 switch (descriptorType)
711 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
712 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
713 s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
715 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
716 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
718 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
719 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
721 default: DE_ASSERT(0);
726 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
728 const char* orderPart;
729 const char* typePart;
731 switch (format.order)
733 case tcu::TextureFormat::R: orderPart = "r"; break;
734 case tcu::TextureFormat::RG: orderPart = "rg"; break;
735 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
736 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
739 DE_FATAL("Impossible");
745 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
746 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
748 case tcu::TextureFormat::UNSIGNED_INT64: typePart = "64ui"; break;
749 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
750 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
751 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
753 case tcu::TextureFormat::SIGNED_INT64: typePart = "64i"; break;
754 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
755 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
756 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
758 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
759 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
761 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
762 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
765 DE_FATAL("Impossible");
769 return std::string() + orderPart + typePart;
772 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
777 if (samples != VK_SAMPLE_COUNT_1_BIT)
780 string coord = "ivec" + to_string(numCoords) + "(";
782 for (int i = 0; i < numCoords; ++i)
788 if (i < numCoords - 1)
793 // Append sample coordinate
794 if (samples != VK_SAMPLE_COUNT_1_BIT)
797 if (dim == numCoords)
805 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
806 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
808 // dim can be 3 for cube_array. Reuse the number of layers in that case.
809 dim = std::min(dim, 2);
812 return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
814 string coord = "vec" + to_string(numCoords) + "(";
816 for (int i = 0; i < numCoords; ++i)
822 if (i < numNormalizedCoords)
823 coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
824 if (i < numCoords - 1)
831 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
833 VkFormat format = m_data.format;
836 generateLayout(layout, m_data);
838 if (layout.layoutBindings.size() > 1 &&
839 layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
841 if (format == VK_FORMAT_R64_SINT)
842 format = VK_FORMAT_R32G32_SINT;
844 if (format == VK_FORMAT_R64_UINT)
845 format = VK_FORMAT_R32G32_UINT;
848 std::stringstream decls, checks;
850 const string r64 = formatIsR64(format) ? "64" : "";
851 const string i64Type = formatIsR64(format) ? "64_t" : "";
852 const string vecType = formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
853 const string qLevelType = vecType == "vec4" ? "float" : ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
855 decls << "uvec4 abs(uvec4 x) { return x; }\n";
856 if (formatIsR64(format))
857 decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
858 decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
861 const int componetsSize = (formatIsR64(format) ? 8 : 4);
862 int refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
863 // Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
864 // robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
865 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
866 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
868 refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
870 if (m_data.nullDescriptor)
871 refDataNumElements = 4;
873 if (formatIsFloat(format))
875 decls << "float refData[" << refDataNumElements << "] = {";
877 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
881 decls << ((const float *)layout.refData.data())[i];
883 while (i < refDataNumElements)
891 else if (formatIsR64(format))
893 decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
895 for (i = 0; i < (int)layout.refData.size() / 8; ++i)
899 decls << ((const deUint64 *)layout.refData.data())[i] << "l";
901 while (i < refDataNumElements)
911 decls << "int" << " refData[" << refDataNumElements << "] = {";
913 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
917 decls << ((const int *)layout.refData.data())[i];
919 while (i < refDataNumElements)
929 decls << vecType << " zzzz = " << vecType << "(0);\n";
930 decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
931 decls << vecType << " expectedIB;\n";
933 string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
934 string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
935 string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
937 string imageDim = "";
938 int numCoords, numNormalizedCoords;
939 bool layered = false;
940 switch (m_data.viewType)
942 default: DE_ASSERT(0); // Fallthrough
943 case VK_IMAGE_VIEW_TYPE_1D: imageDim = "1D"; numCoords = 1; numNormalizedCoords = 1; break;
944 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: imageDim = "1DArray"; numCoords = 2; numNormalizedCoords = 1; layered = true; break;
945 case VK_IMAGE_VIEW_TYPE_2D: imageDim = "2D"; numCoords = 2; numNormalizedCoords = 2; break;
946 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: imageDim = "2DArray"; numCoords = 3; numNormalizedCoords = 2; layered = true; break;
947 case VK_IMAGE_VIEW_TYPE_3D: imageDim = "3D"; numCoords = 3; numNormalizedCoords = 3; break;
948 case VK_IMAGE_VIEW_TYPE_CUBE: imageDim = "Cube"; numCoords = 3; numNormalizedCoords = 3; break;
949 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: imageDim = "CubeArray"; numCoords = 4; numNormalizedCoords = 3; layered = true; break;
951 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
953 switch (m_data.viewType)
955 default: DE_ASSERT(0); // Fallthrough
956 case VK_IMAGE_VIEW_TYPE_2D: imageDim = "2DMS"; break;
957 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: imageDim = "2DMSArray"; break;
961 bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
963 // Special case imageLoad(imageCubeArray, ...) which uses ivec3
964 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
965 m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
970 int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
972 if (numComponents == 1)
973 bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
975 bufType = imgprefix + "vec" + std::to_string(numComponents);
977 // For UBO's, which have a declared size in the shader, don't access outside that size.
978 bool declaredSize = false;
979 switch (m_data.descriptorType) {
980 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
981 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
988 checks << " int inboundcoords, clampedLayer;\n";
989 checks << " " << vecType << " expectedIB2;\n";
993 checks << " [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
995 checks << " [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
1000 checks << " [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
1002 checks << " [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
1005 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1006 checks << " int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
1008 checks << " int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
1010 decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
1012 const char *vol = m_data.vol ? "volatile " : "";
1013 const char *ro = m_data.readOnly ? "readonly " : "";
1015 // Construct the declaration for the binding
1016 switch (m_data.descriptorType)
1018 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1019 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1020 decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
1022 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1023 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1024 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
1025 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
1027 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1030 case VK_FORMAT_R64_SINT:
1031 decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
1033 case VK_FORMAT_R64_UINT:
1034 decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
1037 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
1040 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1041 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
1043 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1044 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
1046 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1049 case VK_FORMAT_R64_SINT:
1050 decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
1052 case VK_FORMAT_R64_UINT:
1053 decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
1056 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
1060 case VERTEX_ATTRIBUTE_FETCH:
1061 if (formatIsR64(format))
1063 decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
1067 decls << "layout(location = 0) in " << vecType << " attr;\n";
1070 default: DE_ASSERT(0);
1076 switch (m_data.descriptorType)
1078 default: DE_ASSERT(0); // Fallthrough
1079 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1080 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1081 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1082 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1083 expectedOOB = "zzzz";
1086 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1087 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1088 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1089 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1090 case VERTEX_ATTRIBUTE_FETCH:
1091 if (numComponents == 1)
1093 expectedOOB = "zzzo";
1095 else if (numComponents == 2)
1097 expectedOOB = "zzzo";
1101 expectedOOB = "zzzz";
1108 switch (m_data.descriptorType)
1110 default: DE_ASSERT(0); // Fallthrough
1111 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1112 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1113 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1114 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1115 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1116 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1117 case VERTEX_ATTRIBUTE_FETCH:
1120 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1121 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1126 if (m_data.nullDescriptor)
1128 checks << " expectedIB = zzzz;\n";
1129 checks << " inboundcoords = 0;\n";
1130 checks << " int paddedinboundcoords = 0;\n";
1131 // Vertex attribute fetch still gets format conversion applied
1132 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1133 expectedOOB = "zzzz";
1137 checks << " expectedIB.x = refData[" << idx << "];\n";
1138 if (numComponents > 1)
1140 checks << " expectedIB.y = refData[" << idx << "+1];\n";
1144 checks << " expectedIB.y = 0;\n";
1146 if (numComponents > 2)
1148 checks << " expectedIB.z = refData[" << idx << "+2];\n";
1149 checks << " expectedIB.w = refData[" << idx << "+3];\n";
1153 checks << " expectedIB.z = 0;\n";
1154 checks << " expectedIB.w = " << defaultw << ";\n";
1157 switch (m_data.descriptorType)
1159 default: DE_ASSERT(0); // Fallthrough
1160 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1161 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1162 // UBOs can either strictly bounds check against inboundcoords, or can
1163 // return the contents from memory for the range padded up to paddedinboundcoords.
1164 checks << " int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1166 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1167 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1168 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1169 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1170 case VERTEX_ATTRIBUTE_FETCH:
1171 checks << " inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1173 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1174 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1175 // set per-component below
1180 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1181 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1182 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1183 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1186 for (int i = 0; i < numCoords; ++i)
1188 // Treat i==3 coord (cube array layer) like i == 2
1189 deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1190 if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1191 checks << " inboundcoords = " << coordDim << ";\n";
1193 string coord = genCoord("c", numCoords, m_data.samples, i);
1194 string inboundcoords =
1195 m_data.nullDescriptor ? "0" :
1196 (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1198 checks << " if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1199 if (m_data.formatQualifier &&
1200 (format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1202 checks << " if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1207 for (int i = 0; i < numCoords; ++i)
1209 // Treat i==3 coord (cube array layer) like i == 2
1210 deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1211 if (!m_data.nullDescriptor)
1213 switch (m_data.descriptorType)
1217 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1218 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1219 checks << " inboundcoords = " << coordDim << ";\n";
1224 string coord = genCoord("c", numCoords, m_data.samples, i);
1226 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1228 if (formatIsR64(format))
1230 checks << " temp.x = attr;\n";
1231 checks << " temp.y = 0l;\n";
1232 checks << " temp.z = 0l;\n";
1233 checks << " temp.w = 0l;\n";
1234 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1238 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1239 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1241 // Accumulate any incorrect values.
1242 checks << " accum += abs(temp);\n";
1244 // Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1245 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1246 !(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1247 (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1249 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1251 checks << " expectedIB2 = expectedIB;\n";
1253 // Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1254 if (dataDependsOnLayer && i == numNormalizedCoords)
1255 checks << " if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1257 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1259 if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1261 checks << " if (temp == zzzz) temp = " << vecType << "(0);\n";
1262 if (m_data.formatQualifier && numComponents < 4)
1263 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1264 checks << " else temp = " << vecType << "(1);\n";
1267 // multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1268 checks << " if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1272 // Storage buffers may be split into per-component loads. Generate a second
1273 // expected out of bounds value where some subset of the components are
1274 // actually in-bounds. If both loads and stores are split into per-component
1275 // accesses, then the result value can be a mix of storeValue and zero.
1276 string expectedOOB2 = expectedOOB;
1277 string expectedOOB3 = expectedOOB;
1278 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1279 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1280 !m_data.nullDescriptor)
1282 int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1283 int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1284 string sstoreValue = de::toString(storeValue);
1290 expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1291 expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1294 expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1295 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1298 expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1299 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1304 // Entirely in-bounds.
1305 checks << " if (c >= 0 && c < inboundcoords) {\n"
1306 " if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1309 // normal out-of-bounds value
1310 if (m_data.testRobustness2)
1311 checks << " else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1313 // image_robustness relaxes alpha which is allowed to be zero or one
1314 checks << " else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1316 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1317 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1319 checks << " else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1322 // null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1323 if (m_data.nullDescriptor && m_data.formatQualifier &&
1324 (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1326 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1328 // non-volatile value replaced with stored value
1329 if (supportsStores(m_data.descriptorType) && !m_data.vol)
1330 checks << " else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1332 // value straddling the boundary, returning a partial vector
1333 if (expectedOOB2 != expectedOOB)
1334 checks << " else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1335 if (expectedOOB3 != expectedOOB)
1336 checks << " else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1339 checks << " else temp = " << vecType << "(1);\n";
1341 // Accumulate any incorrect values.
1342 checks << " accum += abs(temp);\n";
1344 // Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1345 if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1347 // Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1348 string coord0 = genCoord("0", numCoords, m_data.samples, i);
1349 checks << " if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1350 checks << " if (c != 0) temp -= " << expectedOOB << ";\n";
1351 checks << " accum += abs(temp);\n";
1354 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1355 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1357 string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1359 checks << " expectedIB2 = expectedIB;\n";
1361 // Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1362 if (dataDependsOnLayer && i == numNormalizedCoords)
1364 checks << " clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1365 checks << " expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1368 stringstream normexpected;
1369 // Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1370 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1371 m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1372 (layered && i == numCoords-1))
1373 normexpected << " temp -= expectedIB2;\n";
1376 normexpected << " if (c >= 0 && c < inboundcoords)\n";
1377 normexpected << " temp -= expectedIB2;\n";
1378 normexpected << " else\n";
1379 if (m_data.testRobustness2)
1380 normexpected << " temp -= " << expectedOOB << ";\n";
1382 // image_robustness relaxes alpha which is allowed to be zero or one
1383 normexpected << " temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1386 checks << " temp = texture(texture0_1, " << coordNorm << ");\n";
1387 checks << normexpected.str();
1388 checks << " accum += abs(temp);\n";
1389 checks << " temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1390 checks << normexpected.str();
1391 checks << " accum += abs(temp);\n";
1392 checks << " temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1393 checks << normexpected.str();
1394 checks << " accum += abs(temp);\n";
1396 if (m_data.nullDescriptor)
1398 const char *sizeswiz;
1399 switch (m_data.viewType)
1401 default: DE_ASSERT(0); // Fallthrough
1402 case VK_IMAGE_VIEW_TYPE_1D: sizeswiz = ".xxxx"; break;
1403 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: sizeswiz = ".xyxx"; break;
1404 case VK_IMAGE_VIEW_TYPE_2D: sizeswiz = ".xyxx"; break;
1405 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: sizeswiz = ".xyzx"; break;
1406 case VK_IMAGE_VIEW_TYPE_3D: sizeswiz = ".xyzx"; break;
1407 case VK_IMAGE_VIEW_TYPE_CUBE: sizeswiz = ".xyxx"; break;
1408 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: sizeswiz = ".xyzx"; break;
1410 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1412 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1414 checks << " temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1415 checks << " accum += abs(temp);\n";
1417 // checking textureSize with clearly out of range LOD values
1418 checks << " temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1419 checks << " accum += abs(temp);\n";
1420 checks << " temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1421 checks << " accum += abs(temp);\n";
1425 checks << " temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1426 checks << " accum += abs(temp);\n";
1427 checks << " temp = textureSamples(texture0_1).xxxx;\n";
1428 checks << " accum += abs(temp);\n";
1431 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1433 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1435 checks << " temp = imageSize(image0_1)" << sizeswiz <<";\n";
1436 checks << " accum += abs(temp);\n";
1440 checks << " temp = imageSize(image0_1)" << sizeswiz <<";\n";
1441 checks << " accum += abs(temp);\n";
1442 checks << " temp = imageSamples(image0_1).xxxx;\n";
1443 checks << " accum += abs(temp);\n";
1446 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1447 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1449 // expect zero for runtime-sized array .length()
1450 checks << " temp = " << vecType << "(ssbo0_1.val.length());\n";
1451 checks << " accum += abs(temp);\n";
1452 checks << " temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1453 checks << " accum += abs(temp);\n";
1459 // outside the coordinates loop because we only need to call it once
1460 if (m_data.nullDescriptor &&
1461 m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1462 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1464 checks << " temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1465 checks << " temp = " << vecType << "(temp_ql);\n";
1466 checks << " accum += abs(temp);\n";
1468 if (m_data.stage == STAGE_FRAGMENT)
1470 // as here we only want to check that textureQueryLod returns 0 when
1471 // texture0_1 is null, we don't need to use the actual texture coordinates
1472 // (and modify the vertex shader below to do so). Any coordinates are fine.
1473 // gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1474 std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1475 checks << " vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1476 checks << " temp_ql = " << qLevelType << "(ceil(abs(lod.x) + abs(lod.y)));\n";
1477 checks << " temp = " << vecType << "(temp_ql);\n";
1478 checks << " accum += abs(temp);\n";
1483 const bool is64BitFormat = formatIsR64(m_data.format);
1484 std::string support = "#version 460 core\n"
1485 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1486 "#extension GL_EXT_scalar_block_layout : enable\n"
1487 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1488 "#extension GL_EXT_control_flow_attributes : enable\n"
1489 "#extension GL_EXT_shader_image_load_formatted : enable\n";
1490 std::string SupportR64 = "#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1491 "#extension GL_EXT_shader_image_int64 : require\n";
1493 support += SupportR64;
1494 if (m_data.stage == STAGE_RAYGEN)
1495 support += "#extension GL_NV_ray_tracing : require\n";
1497 std::string code = " " + vecType + " accum = " + vecType + "(0);\n"
1498 " " + vecType + " temp;\n"
1499 " " + qLevelType + " temp_ql;\n" +
1501 " " + vecType + " color = (accum != " + vecType + "(0)) ? " + vecType + "(0,0,0,0) : " + vecType + "(1,0,0,1);\n";
1503 switch (m_data.stage)
1505 default: DE_ASSERT(0); // Fallthrough
1508 std::stringstream css;
1511 "layout(local_size_x = 1, local_size_y = 1) in;\n"
1515 " imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1518 programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1519 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1524 std::stringstream css;
1530 " imageStore(image0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1533 programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1534 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1539 std::stringstream vss;
1545 " imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1546 " gl_PointSize = 1.0f;\n"
1547 " gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1550 programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1551 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1554 case STAGE_FRAGMENT:
1556 std::stringstream vss;
1558 "#version 450 core\n"
1561 // full-viewport quad
1562 " gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1565 programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1566 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1568 std::stringstream fss;
1574 " imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1577 programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1578 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1583 // The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1584 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1586 const std::string ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1587 std::stringstream fillShader;
1593 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1594 "layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1595 << string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1597 "layout(std430, binding = 1) buffer inputBuffer\n"
1599 " int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1604 " int gx = int(gl_GlobalInvocationID.x);\n"
1605 " int gy = int(gl_GlobalInvocationID.y);\n"
1606 " int gz = int(gl_GlobalInvocationID.z);\n"
1607 " uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1609 for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1611 fillShader << " imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1614 fillShader << "}\n";
1616 programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1617 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1622 VkImageType imageViewTypeToImageType (VkImageViewType type)
1626 case VK_IMAGE_VIEW_TYPE_1D:
1627 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return VK_IMAGE_TYPE_1D;
1628 case VK_IMAGE_VIEW_TYPE_2D:
1629 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1630 case VK_IMAGE_VIEW_TYPE_CUBE:
1631 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return VK_IMAGE_TYPE_2D;
1632 case VK_IMAGE_VIEW_TYPE_3D: return VK_IMAGE_TYPE_3D;
1637 return VK_IMAGE_TYPE_2D;
1640 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1642 return new RobustnessExtsTestInstance(context, m_data);
1645 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1647 const VkInstance instance = getInstance(m_context, m_data);
1648 const InstanceInterface& vki = getInstanceInterface(m_context, m_data);
1649 const VkDevice device = getLogicalDevice(m_context, m_data);
1650 const vk::DeviceInterface& vk = getDeviceInterface(m_context, m_data);
1651 const VkPhysicalDevice physicalDevice = chooseDevice(vki, instance, m_context.getTestContext().getCommandLine());
1652 SimpleAllocator allocator (vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1655 generateLayout(layout, m_data);
1657 // Get needed properties.
1658 VkPhysicalDeviceProperties2 properties;
1659 deMemset(&properties, 0, sizeof(properties));
1660 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1661 void** pNextTail = &properties.pNext;
1663 #ifndef CTS_USES_VULKANSC
1664 VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
1665 deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
1666 rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
1669 VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties;
1670 deMemset(&robustness2Properties, 0, sizeof(robustness2Properties));
1671 robustness2Properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT;
1673 #ifndef CTS_USES_VULKANSC
1674 if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
1676 *pNextTail = &rayTracingProperties;
1677 pNextTail = &rayTracingProperties.pNext;
1681 if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1683 *pNextTail = &robustness2Properties;
1684 pNextTail = &robustness2Properties.pNext;
1687 vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1689 if (m_data.testRobustness2)
1691 if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1692 robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1693 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1695 if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1696 robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1697 !deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1698 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1701 VkPipelineBindPoint bindPoint;
1703 switch (m_data.stage)
1706 bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1708 #ifndef CTS_USES_VULKANSC
1710 bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1714 bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1718 Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
1719 Move<vk::VkDescriptorPool> descriptorPool;
1720 Move<vk::VkDescriptorSet> descriptorSet;
1722 int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1723 int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1725 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1727 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1729 #ifndef CTS_USES_VULKANSC
1730 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1732 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = 0;
1735 // Create a layout and allocate a descriptor set for it.
1737 const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1739 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1743 (deUint32)bindings.size(),
1744 bindings.empty() ? DE_NULL : bindings.data()
1747 descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1749 vk::DescriptorPoolBuilder poolBuilder;
1750 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1751 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1752 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1753 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1754 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1755 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1756 poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1757 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1759 descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1761 const void *pNext = DE_NULL;
1763 if (!m_data.pushDescriptor)
1764 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1766 de::MovePtr<BufferWithMemory> buffer;
1768 deUint8 *bufferPtr = DE_NULL;
1769 if (!m_data.nullDescriptor)
1771 // Create a buffer to hold data for all descriptors.
1772 VkDeviceSize size = de::max(
1773 (VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
1776 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1777 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1779 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
1781 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1782 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1784 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
1786 else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1788 size = m_data.bufferLen;
1791 buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1792 vk, device, allocator, makeBufferCreateInfo(size,
1793 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
1794 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
1795 VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
1796 VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
1797 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
1798 MemoryRequirement::HostVisible));
1799 bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1801 deMemset(bufferPtr, 0x3f, (size_t)size);
1803 deMemset(bufferPtr, 0, m_data.bufferLen);
1804 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1805 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1807 deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
1809 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1810 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1812 deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
1816 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1818 Move<VkDescriptorSetLayout> descriptorSetLayoutR64;
1819 Move<VkDescriptorPool> descriptorPoolR64;
1820 Move<VkDescriptorSet> descriptorSetFillImage;
1821 Move<VkShaderModule> shaderModuleFillImage;
1822 Move<VkPipelineLayout> pipelineLayoutFillImage;
1823 Move<VkPipeline> pipelineFillImage;
1825 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, 0, queueFamilyIndex);
1826 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1829 vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
1831 const VkImageSubresourceRange barrierRange =
1833 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1834 0u, // deUint32 baseMipLevel;
1835 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
1836 0u, // deUint32 baseArrayLayer;
1837 VK_REMAINING_ARRAY_LAYERS // deUint32 layerCount;
1840 VkImageMemoryBarrier preImageBarrier =
1842 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
1843 DE_NULL, // const void* pNext
1844 0u, // VkAccessFlags srcAccessMask
1845 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
1846 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
1847 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
1848 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
1849 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
1850 DE_NULL, // VkImage image
1851 barrierRange, // VkImageSubresourceRange subresourceRange;
1854 VkImageMemoryBarrier postImageBarrier =
1856 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1857 DE_NULL, // const void* pNext;
1858 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1859 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1860 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1861 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout;
1862 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1863 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1864 DE_NULL, // VkImage image;
1865 barrierRange, // VkImageSubresourceRange subresourceRange;
1868 vk::VkClearColorValue clearValue;
1869 clearValue.uint32[0] = 0u;
1870 clearValue.uint32[1] = 0u;
1871 clearValue.uint32[2] = 0u;
1872 clearValue.uint32[3] = 0u;
1874 beginCommandBuffer(vk, *cmdBuffer, 0u);
1876 typedef vk::Unique<vk::VkBufferView> BufferViewHandleUp;
1877 typedef de::SharedPtr<BufferViewHandleUp> BufferViewHandleSp;
1878 typedef de::SharedPtr<ImageWithMemory> ImageWithMemorySp;
1879 typedef de::SharedPtr<Unique<VkImageView> > VkImageViewSp;
1880 typedef de::MovePtr<BufferWithMemory> BufferWithMemoryMp;
1882 vector<BufferViewHandleSp> bufferViews(1);
1884 VkImageCreateFlags mutableFormatFlag = 0;
1885 // The 64-bit image tests use a view format which differs from the image.
1886 if (formatIsR64(m_data.format))
1887 mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
1888 VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
1889 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1890 imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
1892 const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(vki,
1894 m_data.format).optimalTilingFeatures &
1895 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
1897 const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
1899 const VkImageCreateInfo outputImageCreateInfo =
1901 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1902 DE_NULL, // const void* pNext;
1903 mutableFormatFlag, // VkImageCreateFlags flags;
1904 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1905 m_data.format, // VkFormat format;
1907 DIM, // deUint32 width;
1908 DIM, // deUint32 height;
1909 1u // deUint32 depth;
1910 }, // VkExtent3D extent;
1911 1u, // deUint32 mipLevels;
1912 1u, // deUint32 arrayLayers;
1913 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1914 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1915 VK_IMAGE_USAGE_STORAGE_BIT
1917 | VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1918 | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1919 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1920 0u, // deUint32 queueFamilyIndexCount;
1921 DE_NULL, // const deUint32* pQueueFamilyIndices;
1922 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1925 deUint32 width = m_data.imageDim[0];
1926 deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
1927 deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1928 deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
1929 m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
1930 m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
1931 m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1933 const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
1935 const VkImageCreateInfo imageCreateInfo =
1937 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1938 DE_NULL, // const void* pNext;
1939 imageCreateFlags, // VkImageCreateFlags flags;
1940 imageViewTypeToImageType(m_data.viewType), // VkImageType imageType;
1941 m_data.format, // VkFormat format;
1943 width, // deUint32 width;
1944 height, // deUint32 height;
1945 depth // deUint32 depth;
1946 }, // VkExtent3D extent;
1947 1u, // deUint32 mipLevels;
1948 layers, // deUint32 arrayLayers;
1949 m_data.samples, // VkSampleCountFlagBits samples;
1950 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1953 | VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1954 | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1955 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1956 0u, // deUint32 queueFamilyIndexCount;
1957 DE_NULL, // const deUint32* pQueueFamilyIndices;
1958 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1961 VkImageViewCreateInfo imageViewCreateInfo =
1963 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1964 DE_NULL, // const void* pNext;
1965 (VkImageViewCreateFlags)0u, // VkImageViewCreateFlags flags;
1966 DE_NULL, // VkImage image;
1967 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
1968 m_data.format, // VkFormat format;
1970 VK_COMPONENT_SWIZZLE_IDENTITY,
1971 VK_COMPONENT_SWIZZLE_IDENTITY,
1972 VK_COMPONENT_SWIZZLE_IDENTITY,
1973 VK_COMPONENT_SWIZZLE_IDENTITY
1974 }, // VkComponentMapping components;
1976 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1977 0u, // deUint32 baseMipLevel;
1978 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
1979 0u, // deUint32 baseArrayLayer;
1980 VK_REMAINING_ARRAY_LAYERS // deUint32 layerCount;
1981 } // VkImageSubresourceRange subresourceRange;
1984 vector<ImageWithMemorySp> images(2);
1985 vector<VkImageViewSp> imageViews(2);
1987 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1989 deUint32 *ptr = (deUint32 *)bufferPtr;
1990 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
1993 BufferWithMemoryMp bufferImageR64;
1994 BufferWithMemoryMp bufferOutputImageR64;
1995 const VkDeviceSize sizeOutputR64 = 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
1996 const VkDeviceSize sizeOneLayers = 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
1997 const VkDeviceSize sizeImageR64 = sizeOneLayers * layers;
1999 if (formatIsR64(m_data.format))
2001 bufferOutputImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2002 vk, device, allocator,
2003 makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
2004 MemoryRequirement::HostVisible));
2006 deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
2008 for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
2010 bufferUint64Ptr[ndx] = 0;
2012 flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
2014 bufferImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2015 vk, device, allocator,
2016 makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
2017 MemoryRequirement::HostVisible));
2019 for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
2021 bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
2022 bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
2024 for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
2026 bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
2029 flushAlloc(vk, device, bufferImageR64->getAllocation());
2032 for (size_t b = 0; b < bindings.size(); ++b)
2034 VkDescriptorSetLayoutBinding &binding = bindings[b];
2036 if (binding.descriptorCount == 0)
2038 if (b == 1 && m_data.nullDescriptor)
2041 DE_ASSERT(binding.descriptorCount == 1);
2042 switch (binding.descriptorType)
2044 default: DE_ASSERT(0); // Fallthrough
2045 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2046 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2047 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2048 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2050 deUint32 *ptr = (deUint32 *)bufferPtr;
2051 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2054 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2055 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2057 deUint32 *ptr = (deUint32 *)bufferPtr;
2058 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2060 const vk::VkBufferViewCreateInfo viewCreateInfo =
2062 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2064 (vk::VkBufferViewCreateFlags)0,
2066 m_data.format, // format
2067 (vk::VkDeviceSize)0, // offset
2068 (vk::VkDeviceSize)m_data.bufferLen // range
2070 vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2071 bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2074 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2075 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2077 if (bindings.size() > 1 &&
2078 bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2080 if (m_data.format == VK_FORMAT_R64_SINT)
2081 imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2083 if (m_data.format == VK_FORMAT_R64_UINT)
2084 imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2089 images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2090 imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2094 images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2095 imageViewCreateInfo.viewType = m_data.viewType;
2097 imageViewCreateInfo.image = **images[b];
2098 imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2100 VkImage img = **images[b];
2101 const VkBuffer& bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2102 const VkImageCreateInfo& imageInfo = ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2103 const deUint32 clearLayers = b == 0 ? 1 : layers;
2105 if (!formatIsR64(m_data.format))
2107 preImageBarrier.image = img;
2110 if (formatIsFloat(m_data.format))
2112 deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2114 else if (formatIsSignedInt(m_data.format))
2116 deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2120 deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2123 postImageBarrier.image = img;
2125 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2127 for (unsigned int i = 0; i < clearLayers; ++i)
2129 const VkImageSubresourceRange clearRange =
2131 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2132 0u, // deUint32 baseMipLevel;
2133 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
2134 i, // deUint32 baseArrayLayer;
2135 1 // deUint32 layerCount;
2138 vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2140 // Use same data for all faces for cube(array), otherwise make value a function of the layer
2141 if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2143 if (formatIsFloat(m_data.format))
2144 clearValue.float32[0] += 1;
2145 else if (formatIsSignedInt(m_data.format))
2146 clearValue.int32[0] += 1;
2148 clearValue.uint32[0] += 1;
2151 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2155 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2157 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2158 const VkImageMemoryBarrier imageBarrierPre = makeImageMemoryBarrier(0,
2159 VK_ACCESS_SHADER_WRITE_BIT,
2160 VK_IMAGE_LAYOUT_UNDEFINED,
2161 VK_IMAGE_LAYOUT_GENERAL,
2164 const VkImageMemoryBarrier imageBarrierPost = makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2165 VK_ACCESS_SHADER_READ_BIT,
2166 VK_IMAGE_LAYOUT_GENERAL,
2167 VK_IMAGE_LAYOUT_GENERAL,
2171 descriptorSetLayoutR64 =
2172 DescriptorSetLayoutBuilder()
2173 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2174 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2178 DescriptorPoolBuilder()
2179 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2180 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2181 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2183 descriptorSetFillImage = makeDescriptorSet(vk,
2186 *descriptorSetLayoutR64);
2188 shaderModuleFillImage = createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2189 pipelineLayoutFillImage = makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2190 pipelineFillImage = makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2192 const VkDescriptorImageInfo descResultImageInfo = makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2193 const VkDescriptorBufferInfo descResultBufferInfo = makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2195 DescriptorSetUpdateBuilder()
2196 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2197 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2198 .update(vk, device);
2200 vk.cmdPipelineBarrier(*cmdBuffer,
2201 VK_PIPELINE_STAGE_HOST_BIT,
2202 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2203 (VkDependencyFlags)0,
2204 0, (const VkMemoryBarrier*)DE_NULL,
2205 0, (const VkBufferMemoryBarrier*)DE_NULL,
2206 1, &imageBarrierPre);
2208 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2209 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2211 vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2213 vk.cmdPipelineBarrier(*cmdBuffer,
2214 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2215 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2216 (VkDependencyFlags)0,
2217 0, (const VkMemoryBarrier*)DE_NULL,
2218 0, (const VkBufferMemoryBarrier*)DE_NULL,
2219 1, &imageBarrierPost);
2223 VkDeviceSize size = ((b == 0) ? sizeOutputR64 : sizeImageR64);
2224 const vector<VkBufferImageCopy> bufferImageCopy (1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2226 copyBufferToImage(vk,
2231 VK_IMAGE_ASPECT_COLOR_BIT,
2233 clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2241 const VkSamplerCreateInfo samplerParams =
2243 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
2244 DE_NULL, // const void* pNext;
2245 0, // VkSamplerCreateFlags flags;
2246 VK_FILTER_NEAREST, // VkFilter magFilter:
2247 VK_FILTER_NEAREST, // VkFilter minFilter;
2248 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
2249 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeU;
2250 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeV;
2251 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeW;
2252 0.0f, // float mipLodBias;
2253 VK_FALSE, // VkBool32 anistoropyEnable;
2254 1.0f, // float maxAnisotropy;
2255 VK_FALSE, // VkBool32 compareEnable;
2256 VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
2257 0.0f, // float minLod;
2258 0.0f, // float maxLod;
2259 formatIsFloat(m_data.format) ?
2260 VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2261 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
2262 VK_FALSE // VkBool32 unnormalizedCoordinates;
2265 Move<VkSampler> sampler (createSampler(vk, device, &samplerParams));
2267 // Flush modified memory.
2268 if (!m_data.nullDescriptor)
2269 flushAlloc(vk, device, buffer->getAllocation());
2271 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2273 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
2275 (VkPipelineLayoutCreateFlags)0,
2276 1u, // setLayoutCount
2277 &descriptorSetLayout.get(), // pSetLayouts
2278 0u, // pushConstantRangeCount
2279 DE_NULL, // pPushConstantRanges
2282 Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2284 de::MovePtr<BufferWithMemory> copyBuffer;
2285 copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2286 vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2289 vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2290 vector<VkDescriptorImageInfo> imageInfoVec(2);
2291 vector<VkBufferView> bufferViewVec(2);
2292 vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2296 #ifndef CTS_USES_VULKANSC
2297 vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2298 bufTemplateEntriesBefore,
2299 texelBufTemplateEntriesBefore;
2302 for (size_t b = 0; b < bindings.size(); ++b)
2304 VkDescriptorSetLayoutBinding &binding = bindings[b];
2305 // Construct the declaration for the binding
2306 if (binding.descriptorCount > 0)
2309 switch (binding.descriptorType)
2311 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2312 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2314 if (b == 1 && m_data.nullDescriptor)
2315 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2317 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2319 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2320 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2321 if (b == 1 && m_data.nullDescriptor)
2322 bufferViewVec[vecIndex] = DE_NULL;
2324 bufferViewVec[vecIndex] = **bufferViews[0];
2327 // Other descriptor types.
2328 if (b == 1 && m_data.nullDescriptor)
2329 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2331 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2335 VkWriteDescriptorSet w =
2337 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
2339 m_data.pushDescriptor ? DE_NULL : *descriptorSet, // dstSet
2340 (deUint32)b, // binding
2341 0, // dstArrayElement
2342 1u, // descriptorCount
2343 binding.descriptorType, // descriptorType
2344 &imageInfoVec[vecIndex], // pImageInfo
2345 &bufferInfoVec[vecIndex], // pBufferInfo
2346 &bufferViewVec[vecIndex], // pTexelBufferView
2349 #ifndef CTS_USES_VULKANSC
2350 VkDescriptorUpdateTemplateEntry templateEntry =
2352 (deUint32)b, // uint32_t dstBinding;
2353 0, // uint32_t dstArrayElement;
2354 1u, // uint32_t descriptorCount;
2355 binding.descriptorType, // VkDescriptorType descriptorType;
2356 0, // size_t offset;
2357 0, // size_t stride;
2360 switch (binding.descriptorType)
2362 default: DE_ASSERT(0); // Fallthrough
2363 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2364 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2365 templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2366 imgTemplateEntriesBefore.push_back(templateEntry);
2368 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2369 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2370 templateEntry.offset = vecIndex * sizeof(VkBufferView);
2371 texelBufTemplateEntriesBefore.push_back(templateEntry);
2373 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2374 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2375 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2376 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2377 templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2378 bufTemplateEntriesBefore.push_back(templateEntry);
2385 writesBeforeBindVec.push_back(w);
2387 // Count the number of dynamic descriptors in this set.
2388 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2389 binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2396 // Make zeros have at least one element so &zeros[0] works
2397 vector<deUint32> zeros(de::max(1,numDynamic));
2398 deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2400 // Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2401 if (m_data.useTemplate)
2403 #ifndef CTS_USES_VULKANSC
2404 VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2406 VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, // VkStructureType sType;
2407 NULL, // void* pNext;
2408 0, // VkDescriptorUpdateTemplateCreateFlags flags;
2409 0, // uint32_t descriptorUpdateEntryCount;
2410 DE_NULL, // uint32_t descriptorUpdateEntryCount;
2411 m_data.pushDescriptor ?
2412 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2413 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, // VkDescriptorUpdateTemplateType templateType;
2414 descriptorSetLayout.get(), // VkDescriptorSetLayout descriptorSetLayout;
2415 bindPoint, // VkPipelineBindPoint pipelineBindPoint;
2416 *pipelineLayout, // VkPipelineLayout pipelineLayout;
2420 void *templateVectorData[] =
2422 imageInfoVec.data(),
2423 bufferInfoVec.data(),
2424 bufferViewVec.data(),
2427 vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2429 &imgTemplateEntriesBefore,
2430 &bufTemplateEntriesBefore,
2431 &texelBufTemplateEntriesBefore,
2434 if (m_data.pushDescriptor)
2436 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2438 if (templateVectorsBefore[i]->size())
2440 templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2441 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2442 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2443 vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2449 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2451 if (templateVectorsBefore[i]->size())
2453 templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2454 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2455 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2456 vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2460 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2466 if (m_data.pushDescriptor)
2468 #ifndef CTS_USES_VULKANSC
2469 if (writesBeforeBindVec.size())
2471 vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2477 if (writesBeforeBindVec.size())
2479 vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2482 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2487 Move<VkPipeline> pipeline;
2488 Move<VkRenderPass> renderPass;
2489 Move<VkFramebuffer> framebuffer;
2491 de::MovePtr<BufferWithMemory> sbtBuffer;
2493 if (m_data.stage == STAGE_COMPUTE)
2495 const Unique<VkShaderModule> shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2497 pipeline = makeComputePipeline(vk, device, *pipelineLayout, *shader);
2500 #ifndef CTS_USES_VULKANSC
2501 else if (m_data.stage == STAGE_RAYGEN)
2503 const Unique<VkShaderModule> shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2505 const VkPipelineShaderStageCreateInfo shaderCreateInfo =
2507 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2509 (VkPipelineShaderStageCreateFlags)0,
2510 VK_SHADER_STAGE_RAYGEN_BIT_NV, // stage
2513 DE_NULL, // pSpecializationInfo
2516 VkRayTracingShaderGroupCreateInfoNV group =
2518 VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,
2520 VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, // type
2522 VK_SHADER_UNUSED_NV, // closestHitShader
2523 VK_SHADER_UNUSED_NV, // anyHitShader
2524 VK_SHADER_UNUSED_NV, // intersectionShader
2527 VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2528 VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV, // sType
2532 &shaderCreateInfo, // pStages
2535 0, // maxRecursionDepth
2536 *pipelineLayout, // layout
2537 (vk::VkPipeline)0, // basePipelineHandle
2538 0u, // basePipelineIndex
2541 pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2543 sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2544 vk, device, allocator, makeBufferCreateInfo(rayTracingProperties.shaderGroupHandleSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2546 deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2547 invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2549 vk.getRayTracingShaderGroupHandlesNV(device, *pipeline, 0, 1, rayTracingProperties.shaderGroupHandleSize, ptr);
2554 const VkSubpassDescription subpassDesc =
2556 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
2557 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
2558 0u, // deUint32 inputAttachmentCount
2559 DE_NULL, // const VkAttachmentReference* pInputAttachments
2560 0u, // deUint32 colorAttachmentCount
2561 DE_NULL, // const VkAttachmentReference* pColorAttachments
2562 DE_NULL, // const VkAttachmentReference* pResolveAttachments
2563 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
2564 0u, // deUint32 preserveAttachmentCount
2565 DE_NULL // const deUint32* pPreserveAttachments
2568 const VkSubpassDependency subpassDependency =
2570 VK_SUBPASS_EXTERNAL, // deUint32 srcSubpass
2571 0, // deUint32 dstSubpass
2572 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags srcStageMask
2573 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // VkPipelineStageFlags dstStageMask
2574 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
2575 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
2576 VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags
2579 const VkRenderPassCreateInfo renderPassParams =
2581 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
2582 DE_NULL, // const void* pNext
2583 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
2584 0u, // deUint32 attachmentCount
2585 DE_NULL, // const VkAttachmentDescription* pAttachments
2586 1u, // deUint32 subpassCount
2587 &subpassDesc, // const VkSubpassDescription* pSubpasses
2588 1u, // deUint32 dependencyCount
2589 &subpassDependency // const VkSubpassDependency* pDependencies
2592 renderPass = createRenderPass(vk, device, &renderPassParams);
2594 const vk::VkFramebufferCreateInfo framebufferParams =
2596 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
2598 (vk::VkFramebufferCreateFlags)0,
2599 *renderPass, // renderPass
2600 0u, // attachmentCount
2601 DE_NULL, // pAttachments
2607 framebuffer = createFramebuffer(vk, device, &framebufferParams);
2609 const VkVertexInputBindingDescription vertexInputBindingDescription =
2611 0u, // deUint32 binding
2612 (deUint32)formatBytes, // deUint32 stride
2613 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate
2616 const VkVertexInputAttributeDescription vertexInputAttributeDescription =
2618 0u, // deUint32 location
2619 0u, // deUint32 binding
2620 m_data.format, // VkFormat format
2621 0u // deUint32 offset
2624 deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2626 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
2628 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2629 DE_NULL, // const void* pNext;
2630 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
2631 numAttribs, // deUint32 vertexBindingDescriptionCount;
2632 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2633 numAttribs, // deUint32 vertexAttributeDescriptionCount;
2634 &vertexInputAttributeDescription // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2637 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo =
2639 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
2640 DE_NULL, // const void* pNext;
2641 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
2642 (m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology topology;
2643 VK_FALSE // VkBool32 primitiveRestartEnable;
2646 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo =
2648 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
2649 DE_NULL, // const void* pNext;
2650 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
2651 VK_FALSE, // VkBool32 depthClampEnable;
2652 (m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE, // VkBool32 rasterizerDiscardEnable;
2653 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
2654 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
2655 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
2656 VK_FALSE, // VkBool32 depthBiasEnable;
2657 0.0f, // float depthBiasConstantFactor;
2658 0.0f, // float depthBiasClamp;
2659 0.0f, // float depthBiasSlopeFactor;
2660 1.0f // float lineWidth;
2663 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo =
2665 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType
2666 DE_NULL, // const void* pNext
2667 0u, // VkPipelineMultisampleStateCreateFlags flags
2668 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples
2669 VK_FALSE, // VkBool32 sampleShadingEnable
2670 1.0f, // float minSampleShading
2671 DE_NULL, // const VkSampleMask* pSampleMask
2672 VK_FALSE, // VkBool32 alphaToCoverageEnable
2673 VK_FALSE // VkBool32 alphaToOneEnable
2676 VkViewport viewport = makeViewport(DIM, DIM);
2677 VkRect2D scissor = makeRect2D(DIM, DIM);
2679 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo =
2681 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
2682 DE_NULL, // const void* pNext
2683 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
2684 1u, // deUint32 viewportCount
2685 &viewport, // const VkViewport* pViewports
2686 1u, // deUint32 scissorCount
2687 &scissor // const VkRect2D* pScissors
2690 Move<VkShaderModule> fs;
2691 Move<VkShaderModule> vs;
2694 if (m_data.stage == STAGE_VERTEX)
2696 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2697 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2702 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2703 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2707 const VkPipelineShaderStageCreateInfo shaderCreateInfo[2] =
2710 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2712 (VkPipelineShaderStageCreateFlags)0,
2713 VK_SHADER_STAGE_VERTEX_BIT, // stage
2716 DE_NULL, // pSpecializationInfo
2719 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2721 (VkPipelineShaderStageCreateFlags)0,
2722 VK_SHADER_STAGE_FRAGMENT_BIT, // stage
2725 DE_NULL, // pSpecializationInfo
2729 const VkGraphicsPipelineCreateInfo graphicsPipelineCreateInfo =
2731 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
2732 DE_NULL, // const void* pNext;
2733 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
2734 numStages, // deUint32 stageCount;
2735 &shaderCreateInfo[0], // const VkPipelineShaderStageCreateInfo* pStages;
2736 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
2737 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
2738 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
2739 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
2740 &rasterizationStateCreateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
2741 &multisampleStateCreateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
2742 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
2743 DE_NULL, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
2744 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
2745 pipelineLayout.get(), // VkPipelineLayout layout;
2746 renderPass.get(), // VkRenderPass renderPass;
2747 0u, // deUint32 subpass;
2748 DE_NULL, // VkPipeline basePipelineHandle;
2749 0 // int basePipelineIndex;
2752 pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2755 const VkImageMemoryBarrier imageBarrier =
2757 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
2758 DE_NULL, // const void* pNext
2759 0u, // VkAccessFlags srcAccessMask
2760 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
2761 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
2762 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
2763 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
2764 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
2765 **images[0], // VkImage image
2767 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
2768 0u, // uint32_t baseMipLevel
2769 1u, // uint32_t mipLevels,
2770 0u, // uint32_t baseArray
2771 1u, // uint32_t arraySize
2775 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2776 (VkDependencyFlags)0,
2777 0, (const VkMemoryBarrier*)DE_NULL,
2778 0, (const VkBufferMemoryBarrier*)DE_NULL,
2781 vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
2783 if (!formatIsR64(m_data.format))
2785 VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2786 VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
2788 vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
2792 const vector<VkBufferImageCopy> bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
2793 copyBufferToImage(vk,
2795 *(*bufferOutputImageR64),
2798 VK_IMAGE_ASPECT_COLOR_BIT,
2800 1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2803 VkMemoryBarrier memBarrier =
2805 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
2807 0u, // srcAccessMask
2808 0u, // dstAccessMask
2811 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2812 memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2813 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
2814 0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2816 if (m_data.stage == STAGE_COMPUTE)
2818 vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
2820 #ifndef CTS_USES_VULKANSC
2821 else if (m_data.stage == STAGE_RAYGEN)
2823 vk.cmdTraceRaysNV(*cmdBuffer,
2833 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
2834 makeRect2D(DIM, DIM),
2835 0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
2836 // Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
2837 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2839 VkDeviceSize zeroOffset = 0;
2840 VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
2841 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
2842 vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
2844 if (m_data.stage == STAGE_VERTEX)
2846 vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
2850 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
2852 endRenderPass(vk, *cmdBuffer);
2855 memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2856 memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
2857 vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
2858 0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2860 const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
2861 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
2862 vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, ©Region);
2864 endCommandBuffer(vk, *cmdBuffer);
2866 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
2868 void *ptr = copyBuffer->getAllocation().getHostPtr();
2870 invalidateAlloc(vk, device, copyBuffer->getAllocation());
2872 qpTestResult res = QP_TEST_RESULT_PASS;
2874 for (deUint32 i = 0; i < DIM*DIM; ++i)
2876 if (formatIsFloat(m_data.format))
2878 if (((float *)ptr)[i * numComponents] != 1.0f)
2880 res = QP_TEST_RESULT_FAIL;
2883 else if (formatIsR64(m_data.format))
2885 if (((deUint64 *)ptr)[i * numComponents] != 1)
2887 res = QP_TEST_RESULT_FAIL;
2892 if (((deUint32 *)ptr)[i * numComponents] != 1)
2894 res = QP_TEST_RESULT_FAIL;
2899 return tcu::TestStatus(res, qpGetTestResultName(res));
2904 static void createTests (tcu::TestCaseGroup* group, bool robustness2)
2906 tcu::TestContext& testCtx = group->getTestContext();
2912 const char* description;
2915 TestGroupCase fmtCases[] =
2917 { VK_FORMAT_R32_SINT, "r32i", "" },
2918 { VK_FORMAT_R32_UINT, "r32ui", "" },
2919 { VK_FORMAT_R32_SFLOAT, "r32f", "" },
2920 { VK_FORMAT_R32G32_SINT, "rg32i", "" },
2921 { VK_FORMAT_R32G32_UINT, "rg32ui", "" },
2922 { VK_FORMAT_R32G32_SFLOAT, "rg32f", "" },
2923 { VK_FORMAT_R32G32B32A32_SINT, "rgba32i", "" },
2924 { VK_FORMAT_R32G32B32A32_UINT, "rgba32ui", "" },
2925 { VK_FORMAT_R32G32B32A32_SFLOAT, "rgba32f", "" },
2926 { VK_FORMAT_R64_SINT, "r64i", "" },
2927 { VK_FORMAT_R64_UINT, "r64ui", "" },
2930 TestGroupCase fullDescCases[] =
2932 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, "uniform_buffer", "" },
2933 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, "storage_buffer", "" },
2934 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, "uniform_buffer_dynamic", "" },
2935 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, "storage_buffer_dynamic", "" },
2936 { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer", "" },
2937 { VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer", "" },
2938 { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image", "" },
2939 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image", "" },
2940 { VERTEX_ATTRIBUTE_FETCH, "vertex_attribute_fetch", "" },
2943 TestGroupCase imgDescCases[] =
2945 { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image", "" },
2946 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image", "" },
2949 TestGroupCase fullLenCases32Bit[] =
2951 { ~0U, "null_descriptor", "" },
2955 { 12, "len_12", "" },
2956 { 16, "len_16", "" },
2957 { 20, "len_20", "" },
2958 { 31, "len_31", "" },
2959 { 32, "len_32", "" },
2960 { 33, "len_33", "" },
2961 { 35, "len_35", "" },
2962 { 36, "len_36", "" },
2963 { 39, "len_39", "" },
2964 { 40, "len_41", "" },
2965 { 252, "len_252", "" },
2966 { 256, "len_256", "" },
2967 { 260, "len_260", "" },
2970 TestGroupCase fullLenCases64Bit[] =
2972 { ~0U, "null_descriptor", "" },
2975 { 16, "len_16", "" },
2976 { 24, "len_24", "" },
2977 { 32, "len_32", "" },
2978 { 40, "len_40", "" },
2979 { 62, "len_62", "" },
2980 { 64, "len_64", "" },
2981 { 66, "len_66", "" },
2982 { 70, "len_70", "" },
2983 { 72, "len_72", "" },
2984 { 78, "len_78", "" },
2985 { 80, "len_80", "" },
2986 { 504, "len_504", "" },
2987 { 512, "len_512", "" },
2988 { 520, "len_520", "" },
2991 TestGroupCase imgLenCases[] =
2996 TestGroupCase viewCases[] =
2998 { VK_IMAGE_VIEW_TYPE_1D, "1d", "" },
2999 { VK_IMAGE_VIEW_TYPE_2D, "2d", "" },
3000 { VK_IMAGE_VIEW_TYPE_3D, "3d", "" },
3001 { VK_IMAGE_VIEW_TYPE_CUBE, "cube", "" },
3002 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array", "" },
3003 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array", "" },
3004 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array", "" },
3007 TestGroupCase sampCases[] =
3009 { VK_SAMPLE_COUNT_1_BIT, "samples_1", "" },
3010 { VK_SAMPLE_COUNT_4_BIT, "samples_4", "" },
3013 TestGroupCase stageCases[] =
3015 { STAGE_COMPUTE, "comp", "compute" },
3016 { STAGE_FRAGMENT, "frag", "fragment" },
3017 { STAGE_VERTEX, "vert", "vertex" },
3018 #ifndef CTS_USES_VULKANSC
3019 { STAGE_RAYGEN, "rgen", "raygen" },
3023 TestGroupCase volCases[] =
3025 { 0, "nonvolatile", "" },
3026 { 1, "volatile", "" },
3029 TestGroupCase unrollCases[] =
3031 { 0, "dontunroll", "" },
3032 { 1, "unroll", "" },
3035 TestGroupCase tempCases[] =
3037 { 0, "notemplate", "" },
3038 #ifndef CTS_USES_VULKANSC
3039 { 1, "template", "" },
3043 TestGroupCase pushCases[] =
3046 #ifndef CTS_USES_VULKANSC
3051 TestGroupCase fmtQualCases[] =
3053 { 0, "no_fmt_qual", "" },
3054 { 1, "fmt_qual", "" },
3057 TestGroupCase readOnlyCases[] =
3059 { 0, "readwrite", "" },
3060 { 1, "readonly", "" },
3063 for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3065 de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
3066 for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3068 de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name, tempCases[tempNdx].name));
3069 for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3071 de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name, fmtCases[fmtNdx].name));
3073 int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3075 for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3077 de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name, unrollCases[unrollNdx].name));
3078 for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3080 de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name, volCases[volNdx].name));
3082 int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3083 TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3085 for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3087 de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
3089 for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3091 de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name, readOnlyCases[roNdx].name));
3093 // readonly cases are just for storage_buffer
3094 if (readOnlyCases[roNdx].count != 0 &&
3095 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3096 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3099 for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3101 de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
3103 // format qualifier is only used for storage image and storage texel buffers
3104 if (fmtQualCases[fmtQualNdx].count &&
3105 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3108 if (pushCases[pushNdx].count &&
3109 (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3112 const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3113 int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3114 TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3116 for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3118 if (lenCases[lenNdx].count != ~0U)
3120 bool bufferLen = lenCases[lenNdx].count != 0;
3121 bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3122 if (bufferLen != bufferDesc)
3125 // Add template tests cases only for null_descriptor cases
3126 if (tempCases[tempNdx].count)
3130 if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3131 ((lenCases[lenNdx].count % fmtSize) != 0) &&
3132 lenCases[lenNdx].count != ~0U)
3137 // "volatile" only applies to storage images/buffers
3138 if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3141 de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
3142 for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3144 de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
3145 for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3147 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3148 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3149 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3151 // buffer descriptors don't have different dimensionalities. Only test "1D"
3155 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3156 sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3161 de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
3162 for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3164 Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3165 VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3166 VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3167 #ifndef CTS_USES_VULKANSC
3168 if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3170 allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
3171 allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
3173 #endif // CTS_USES_VULKANSC
3175 if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3176 currentStage != STAGE_VERTEX)
3179 deUint32 imageDim[3] = {5, 11, 6};
3180 if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3181 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3182 imageDim[1] = imageDim[0];
3186 (VkFormat)fmtCases[fmtNdx].count, // VkFormat format;
3187 currentStage, // Stage stage;
3188 allShaderStages, // VkFlags allShaderStages;
3189 allPipelineStages, // VkFlags allPipelineStages;
3190 (int)descCases[descNdx].count, // VkDescriptorType descriptorType;
3191 (VkImageViewType)viewCases[viewNdx].count, // VkImageViewType viewType;
3192 (VkSampleCountFlagBits)sampCases[sampNdx].count, // VkSampleCountFlagBits samples;
3193 (int)lenCases[lenNdx].count, // int bufferLen;
3194 (bool)unrollCases[unrollNdx].count, // bool unroll;
3195 (bool)volCases[volNdx].count, // bool vol;
3196 (bool)(lenCases[lenNdx].count == ~0U), // bool nullDescriptor
3197 (bool)tempCases[tempNdx].count, // bool useTemplate
3198 (bool)fmtQualCases[fmtQualNdx].count, // bool formatQualifier
3199 (bool)pushCases[pushNdx].count, // bool pushDescriptor;
3200 (bool)robustness2, // bool testRobustness2;
3201 { imageDim[0], imageDim[1], imageDim[2] }, // deUint32 imageDim[3];
3202 (bool)(readOnlyCases[roNdx].count == 1), // bool readOnly;
3205 viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
3207 sampGroup->addChild(viewGroup.release());
3209 lenGroup->addChild(sampGroup.release());
3211 fmtQualGroup->addChild(lenGroup.release());
3213 // Put storage_buffer tests in separate readonly vs readwrite groups. Other types
3214 // go directly into descGroup
3215 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3216 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3217 rwGroup->addChild(fmtQualGroup.release());
3219 descGroup->addChild(fmtQualGroup.release());
3222 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3223 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3224 descGroup->addChild(rwGroup.release());
3227 volGroup->addChild(descGroup.release());
3229 unrollGroup->addChild(volGroup.release());
3231 fmtGroup->addChild(unrollGroup.release());
3233 tempGroup->addChild(fmtGroup.release());
3235 pushGroup->addChild(tempGroup.release());
3237 group->addChild(pushGroup.release());
3241 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3243 createTests(group, /*robustness2=*/true);
3246 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3248 createTests(group, /*robustness2=*/false);
3251 static void cleanupGroup (tcu::TestCaseGroup* group)
3254 // Destroy singleton objects.
3255 Robustness2Int64AtomicsSingleton::destroy();
3256 ImageRobustnessInt64AtomicsSingleton::destroy();
3257 ImageRobustnessSingleton::destroy();
3258 Robustness2Singleton::destroy();
3261 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
3263 return createTestGroup(testCtx, "robustness2", "VK_EXT_robustness2 tests",
3264 createRobustness2Tests, cleanupGroup);
3267 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
3269 return createTestGroup(testCtx, "image_robustness", "VK_EXT_image_robustness tests",
3270 createImageRobustnessTests, cleanupGroup);