1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2017-2019 The Khronos Group Inc.
6 * Copyright (c) 2018-2020 NVIDIA Corporation
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Vulkan robustness2 tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktRobustnessExtsTests.hpp"
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkBuilderUtil.hpp"
32 #include "vkCmdUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkObjUtil.hpp"
35 #include "vkBarrierUtil.hpp"
36 #include "vktRobustnessUtil.hpp"
38 #include "vktTestGroupUtil.hpp"
39 #include "vktTestCase.hpp"
44 #include "deSharedPtr.hpp"
47 #include "tcuVectorType.hpp"
48 #include "tcuTestCase.hpp"
49 #include "tcuTestLog.hpp"
66 enum RobustnessFeatureBits
68 RF_IMG_ROBUSTNESS = (1 ),
69 RF_ROBUSTNESS2 = (1 << 1 ),
70 SIF_INT64ATOMICS = (1 << 2 ),
73 using RobustnessFeatures = deUint32;
75 // Class to wrap a singleton device with the indicated robustness features.
76 template <RobustnessFeatures FEATURES>
79 SingletonDevice (Context& context)
82 // Note we are already checking the needed features are available in checkSupport().
83 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
84 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
85 VkPhysicalDeviceScalarBlockLayoutFeatures scalarBlockLayoutFeatures = initVulkanStructure();
86 VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT shaderImageAtomicInt64Features = initVulkanStructure();
87 VkPhysicalDeviceFeatures2 features2 = initVulkanStructure();
89 features2.pNext = &scalarBlockLayoutFeatures;
91 if (FEATURES & RF_IMG_ROBUSTNESS)
93 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
94 imageRobustnessFeatures.pNext = features2.pNext;
95 features2.pNext = &imageRobustnessFeatures;
98 if (FEATURES & RF_ROBUSTNESS2)
100 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
101 robustness2Features.pNext = features2.pNext;
102 features2.pNext = &robustness2Features;
105 if (FEATURES & SIF_INT64ATOMICS)
107 DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"));
108 shaderImageAtomicInt64Features.pNext = features2.pNext;
109 features2.pNext = &shaderImageAtomicInt64Features;
112 context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
113 m_logicalDevice = createRobustBufferAccessDevice(context, &features2);
117 static VkDevice getDevice(Context& context)
119 if (!m_singletonDevice)
120 m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
121 DE_ASSERT(m_singletonDevice);
122 return m_singletonDevice->m_logicalDevice.get();
125 static void destroy()
127 m_singletonDevice.clear();
131 Move<vk::VkDevice> m_logicalDevice;
132 static SharedPtr<SingletonDevice<FEATURES>> m_singletonDevice;
135 template <RobustnessFeatures FEATURES>
136 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
138 constexpr RobustnessFeatures kImageRobustness = RF_IMG_ROBUSTNESS;
139 constexpr RobustnessFeatures kRobustness2 = RF_ROBUSTNESS2;
140 constexpr RobustnessFeatures kShaderImageInt64Atomics = SIF_INT64ATOMICS;
142 using ImageRobustnessSingleton = SingletonDevice<kImageRobustness>;
143 using Robustness2Singleton = SingletonDevice<kRobustness2>;
145 using ImageRobustnessInt64AtomicsSingleton = SingletonDevice<kImageRobustness | kShaderImageInt64Atomics>;
146 using Robustness2Int64AtomicsSingleton = SingletonDevice<kRobustness2 | kShaderImageInt64Atomics>;
148 // Render target / compute grid dimensions
149 static const deUint32 DIM = 8;
151 // treated as a phony VkDescriptorType value
152 #define VERTEX_ATTRIBUTE_FETCH 999
166 VkFlags allShaderStages;
167 VkFlags allPipelineStages;
168 int/*VkDescriptorType*/ descriptorType;
169 VkImageViewType viewType;
170 VkSampleCountFlagBits samples;
176 bool formatQualifier;
178 bool testRobustness2;
179 deUint32 imageDim[3]; // width, height, depth or layers
183 static bool formatIsR64(const VkFormat& f)
187 case VK_FORMAT_R64_SINT:
188 case VK_FORMAT_R64_UINT:
195 // Returns the appropriate singleton device for the given case.
196 VkDevice getLogicalDevice (Context& ctx, const CaseDef& caseDef)
198 if (formatIsR64(caseDef.format))
200 if (caseDef.testRobustness2)
201 return Robustness2Int64AtomicsSingleton::getDevice(ctx);
202 return ImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
205 if (caseDef.testRobustness2)
206 return Robustness2Singleton::getDevice(ctx);
207 return ImageRobustnessSingleton::getDevice(ctx);
213 vector<VkDescriptorSetLayoutBinding> layoutBindings;
214 vector<deUint8> refData;
218 class RobustnessExtsTestInstance : public TestInstance
221 RobustnessExtsTestInstance (Context& context, const CaseDef& data);
222 ~RobustnessExtsTestInstance (void);
223 tcu::TestStatus iterate (void);
228 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
229 : vkt::TestInstance (context)
234 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
238 class RobustnessExtsTestCase : public TestCase
241 RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data);
242 ~RobustnessExtsTestCase (void);
243 virtual void initPrograms (SourceCollections& programCollection) const;
244 virtual TestInstance* createInstance (Context& context) const;
245 virtual void checkSupport (Context& context) const;
251 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data)
252 : vkt::TestCase (context, name, desc)
257 RobustnessExtsTestCase::~RobustnessExtsTestCase (void)
261 static bool formatIsFloat(const VkFormat& f)
265 case VK_FORMAT_R32_SFLOAT:
266 case VK_FORMAT_R32G32_SFLOAT:
267 case VK_FORMAT_R32G32B32A32_SFLOAT:
274 static bool formatIsSignedInt(const VkFormat& f)
278 case VK_FORMAT_R32_SINT:
279 case VK_FORMAT_R64_SINT:
280 case VK_FORMAT_R32G32_SINT:
281 case VK_FORMAT_R32G32B32A32_SINT:
288 static bool supportsStores(int descriptorType)
290 switch (descriptorType)
292 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
293 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
294 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
295 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
302 Move<VkPipeline> makeComputePipeline (const DeviceInterface& vk,
303 const VkDevice device,
304 const VkPipelineLayout pipelineLayout,
305 const VkShaderModule shaderModule)
307 const VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
309 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
310 DE_NULL, // const void* pNext;
311 (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags;
312 VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
313 shaderModule, // VkShaderModule module;
314 "main", // const char* pName;
315 DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
318 const VkComputePipelineCreateInfo pipelineCreateInfo =
320 VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
321 DE_NULL, // const void* pNext;
322 0u, // VkPipelineCreateFlags flags;
323 pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
324 pipelineLayout, // VkPipelineLayout layout;
325 (vk::VkPipeline)0, // VkPipeline basePipelineHandle;
326 0, // deInt32 basePipelineIndex;
329 return createComputePipeline(vk, device, DE_NULL , &pipelineCreateInfo);
332 void RobustnessExtsTestCase::checkSupport(Context& context) const
334 const auto& vki = context.getInstanceInterface();
335 const auto physicalDevice = context.getPhysicalDevice();
337 // We need to query feature support using the physical device instead of using the reported context features because robustness2
338 // and image robustness are always disabled in the default device but they may be available.
339 VkPhysicalDeviceRobustness2FeaturesEXT robustness2Features = initVulkanStructure();
340 VkPhysicalDeviceImageRobustnessFeaturesEXT imageRobustnessFeatures = initVulkanStructure();
341 VkPhysicalDeviceScalarBlockLayoutFeatures scalarLayoutFeatures = initVulkanStructure();
342 VkPhysicalDeviceFeatures2KHR features2 = initVulkanStructure();
344 context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
346 context.requireDeviceFunctionality("VK_EXT_scalar_block_layout");
347 features2.pNext = &scalarLayoutFeatures;
349 if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
351 imageRobustnessFeatures.pNext = features2.pNext;
352 features2.pNext = &imageRobustnessFeatures;
355 if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
357 robustness2Features.pNext = features2.pNext;
358 features2.pNext = &robustness2Features;
361 vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
363 if (formatIsR64(m_data.format))
365 context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
367 VkFormatProperties formatProperties;
368 vki.getPhysicalDeviceFormatProperties(context.getPhysicalDevice(), m_data.format, &formatProperties);
370 switch (m_data.descriptorType)
372 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
373 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
374 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
376 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
377 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
378 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
380 case VERTEX_ATTRIBUTE_FETCH:
381 if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
382 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
384 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
385 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
386 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
388 default: DE_ASSERT(true);
391 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
393 if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
394 TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
398 // Check needed properties and features
399 if (!scalarLayoutFeatures.scalarBlockLayout)
400 TCU_THROW(NotSupportedError, "Scalar block layout not supported");
402 if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
403 TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
405 if (m_data.stage == STAGE_FRAGMENT && !features2.features.fragmentStoresAndAtomics)
406 TCU_THROW(NotSupportedError, "Fragment shader stores not supported");
408 if (m_data.stage == STAGE_RAYGEN)
409 context.requireDeviceFunctionality("VK_NV_ray_tracing");
411 switch (m_data.descriptorType)
413 default: DE_ASSERT(0); // Fallthrough
414 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
415 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
416 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
417 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
418 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
419 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
420 case VERTEX_ATTRIBUTE_FETCH:
421 if (m_data.testRobustness2)
423 if (!robustness2Features.robustBufferAccess2)
424 TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
428 // This case is not tested here.
432 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
433 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
434 if (m_data.testRobustness2)
436 if (!robustness2Features.robustImageAccess2)
437 TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
441 if (!imageRobustnessFeatures.robustImageAccess)
442 TCU_THROW(NotSupportedError, "robustImageAccess not supported");
447 if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
448 TCU_THROW(NotSupportedError, "nullDescriptor not supported");
450 // The fill shader for 64-bit multisample image tests uses a storage image.
451 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && formatIsR64(m_data.format) &&
452 !features2.features.shaderStorageImageMultisample)
453 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
455 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
456 m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
457 !features2.features.shaderStorageImageMultisample)
458 TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
460 if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(1, 1, 0)))
461 TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
463 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
464 !m_data.formatQualifier &&
465 (!features2.features.shaderStorageImageReadWithoutFormat || !features2.features.shaderStorageImageWriteWithoutFormat))
466 TCU_THROW(NotSupportedError, "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
468 if (m_data.pushDescriptor)
469 context.requireDeviceFunctionality("VK_KHR_push_descriptor");
471 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && !features2.features.imageCubeArray)
472 TCU_THROW(NotSupportedError, "Cube array image view type not supported");
474 if (context.isDeviceFunctionalitySupported("VK_KHR_portability_subset") && !context.getDeviceFeatures().robustBufferAccess)
475 TCU_THROW(NotSupportedError, "VK_KHR_portability_subset: robustBufferAccess not supported by this implementation");
478 void generateLayout(Layout &layout, const CaseDef &caseDef)
480 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
481 int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
482 bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
484 for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
486 VkDescriptorSetLayoutBinding &binding = bindings[b];
488 binding.pImmutableSamplers = NULL;
489 binding.stageFlags = caseDef.allShaderStages;
490 binding.descriptorCount = 1;
494 binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
495 else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
496 binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
499 if (caseDef.nullDescriptor)
502 if (caseDef.bufferLen == 0)
504 // Clear color values for image tests
505 static deUint32 urefData[4] = { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
506 static deUint64 urefData64[4] = { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
507 static float frefData[4] = { 123.f, 234.f, 345.f, 456.f };
509 if (formatIsR64(caseDef.format))
511 layout.refData.resize(32);
512 deUint64 *ptr = (deUint64 *)layout.refData.data();
514 for (unsigned int i = 0; i < 4; ++i)
516 ptr[i] = urefData64[i];
521 layout.refData.resize(16);
522 deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
527 layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
528 for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
530 if (formatIsFloat(caseDef.format))
532 float *f = (float *)layout.refData.data() + i;
533 *f = 2.0f*(float)i + 3.0f;
535 if (formatIsR64(caseDef.format))
537 deUint64 *u = (deUint64 *)layout.refData.data() + i;
542 int *u = (int *)layout.refData.data() + i;
549 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
552 // Fetch from the descriptor.
553 switch (caseDef.descriptorType)
555 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
556 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
557 s << vecType << "(ubo0_1.val[" << coord << "]";
558 for (int i = numComponents; i < 4; ++i) s << ", 0";
561 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
562 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
563 s << vecType << "(ssbo0_1.val[" << coord << "]";
564 for (int i = numComponents; i < 4; ++i) s << ", 0";
567 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
568 s << "texelFetch(texbo0_1, " << coord << ")";
570 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
571 s << "imageLoad(image0_1, " << coord << ")";
573 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
574 if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
575 s << "texelFetch(texture0_1, " << coord << ")";
577 s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
579 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
580 s << "imageLoad(image0_1, " << coord << ")";
582 case VERTEX_ATTRIBUTE_FETCH:
585 default: DE_ASSERT(0);
590 static const int storeValue = 123;
592 // Get the value stored by genStore.
593 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
596 switch (descriptorType)
598 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
599 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
600 s << vecType << "(" << bufType << "(" << storeValue << ")";
601 for (int i = numComponents; i < 4; ++i) s << ", 0";
604 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
605 s << vecType << "(" << storeValue << ")";
607 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
608 s << vecType << "(" << storeValue << ")";
610 default: DE_ASSERT(0);
615 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
618 // Store to the descriptor.
619 switch (descriptorType)
621 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
622 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
623 s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
625 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
626 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
628 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
629 s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
631 default: DE_ASSERT(0);
636 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
639 // Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
640 switch (descriptorType)
642 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
643 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
644 s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
646 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
647 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
649 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
650 s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
652 default: DE_ASSERT(0);
657 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
659 const char* orderPart;
660 const char* typePart;
662 switch (format.order)
664 case tcu::TextureFormat::R: orderPart = "r"; break;
665 case tcu::TextureFormat::RG: orderPart = "rg"; break;
666 case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
667 case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
670 DE_FATAL("Impossible");
676 case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
677 case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
679 case tcu::TextureFormat::UNSIGNED_INT64: typePart = "64ui"; break;
680 case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
681 case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
682 case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
684 case tcu::TextureFormat::SIGNED_INT64: typePart = "64i"; break;
685 case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
686 case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
687 case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
689 case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
690 case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
692 case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
693 case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
696 DE_FATAL("Impossible");
700 return std::string() + orderPart + typePart;
703 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
708 if (samples != VK_SAMPLE_COUNT_1_BIT)
711 string coord = "ivec" + to_string(numCoords) + "(";
713 for (int i = 0; i < numCoords; ++i)
719 if (i < numCoords - 1)
724 // Append sample coordinate
725 if (samples != VK_SAMPLE_COUNT_1_BIT)
728 if (dim == numCoords)
736 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
737 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
740 return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
742 string coord = "vec" + to_string(numCoords) + "(";
744 for (int i = 0; i < numCoords; ++i)
750 if (i < numNormalizedCoords)
751 coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
752 if (i < numCoords - 1)
759 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
761 VkFormat format = m_data.format;
764 generateLayout(layout, m_data);
766 if (layout.layoutBindings.size() > 1 &&
767 layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
769 if (format == VK_FORMAT_R64_SINT)
770 format = VK_FORMAT_R32G32_SINT;
772 if (format == VK_FORMAT_R64_UINT)
773 format = VK_FORMAT_R32G32_UINT;
776 std::stringstream decls, checks;
778 const string r64 = formatIsR64(format) ? "64" : "";
779 const string i64Type = formatIsR64(format) ? "64_t" : "";
780 const string vecType = formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
781 const string qLevelType = vecType == "vec4" ? "float" : ((vecType == "ivec4") || (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
783 decls << "uvec4 abs(uvec4 x) { return x; }\n";
784 if (formatIsR64(format))
785 decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
786 decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
789 const int componetsSize = (formatIsR64(format) ? 8 : 4);
790 int refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
791 // Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
792 // robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
793 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
794 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
796 refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
798 if (m_data.nullDescriptor)
799 refDataNumElements = 4;
801 if (formatIsFloat(format))
803 decls << "float refData[" << refDataNumElements << "] = {";
805 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
809 decls << ((const float *)layout.refData.data())[i];
811 while (i < refDataNumElements)
819 else if (formatIsR64(format))
821 decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
823 for (i = 0; i < (int)layout.refData.size() / 8; ++i)
827 decls << ((const deUint64 *)layout.refData.data())[i] << "l";
829 while (i < refDataNumElements)
839 decls << "int" << " refData[" << refDataNumElements << "] = {";
841 for (i = 0; i < (int)layout.refData.size() / 4; ++i)
845 decls << ((const int *)layout.refData.data())[i];
847 while (i < refDataNumElements)
857 decls << vecType << " zzzz = " << vecType << "(0);\n";
858 decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
859 decls << vecType << " expectedIB;\n";
861 string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
862 string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
863 string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
865 string imageDim = "";
866 int numCoords, numNormalizedCoords;
867 bool layered = false;
868 switch (m_data.viewType)
870 default: DE_ASSERT(0); // Fallthrough
871 case VK_IMAGE_VIEW_TYPE_1D: imageDim = "1D"; numCoords = 1; numNormalizedCoords = 1; break;
872 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: imageDim = "1DArray"; numCoords = 2; numNormalizedCoords = 1; layered = true; break;
873 case VK_IMAGE_VIEW_TYPE_2D: imageDim = "2D"; numCoords = 2; numNormalizedCoords = 2; break;
874 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: imageDim = "2DArray"; numCoords = 3; numNormalizedCoords = 2; layered = true; break;
875 case VK_IMAGE_VIEW_TYPE_3D: imageDim = "3D"; numCoords = 3; numNormalizedCoords = 3; break;
876 case VK_IMAGE_VIEW_TYPE_CUBE: imageDim = "Cube"; numCoords = 3; numNormalizedCoords = 3; break;
877 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: imageDim = "CubeArray"; numCoords = 4; numNormalizedCoords = 3; layered = true; break;
879 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
881 switch (m_data.viewType)
883 default: DE_ASSERT(0); // Fallthrough
884 case VK_IMAGE_VIEW_TYPE_2D: imageDim = "2DMS"; break;
885 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: imageDim = "2DMSArray"; break;
889 bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
891 // Special case imageLoad(imageCubeArray, ...) which uses ivec3
892 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
893 m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
898 int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
900 if (numComponents == 1)
901 bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
903 bufType = imgprefix + "vec" + std::to_string(numComponents);
905 // For UBO's, which have a declared size in the shader, don't access outside that size.
906 bool declaredSize = false;
907 switch (m_data.descriptorType) {
908 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
909 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
916 checks << " int inboundcoords, clampedLayer;\n";
917 checks << " " << vecType << " expectedIB2;\n";
921 checks << " [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
923 checks << " [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
928 checks << " [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
930 checks << " [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
933 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
934 checks << " int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
936 checks << " int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
938 decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
940 const char *vol = m_data.vol ? "volatile " : "";
941 const char *ro = m_data.readOnly ? "readonly " : "";
943 // Construct the declaration for the binding
944 switch (m_data.descriptorType)
946 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
947 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
948 decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
950 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
951 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
952 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
953 decls << "layout(scalar, set = 0, binding = 1) " << vol << ro << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
955 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
958 case VK_FORMAT_R64_SINT:
959 decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
961 case VK_FORMAT_R64_UINT:
962 decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
965 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
968 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
969 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
971 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
972 decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
974 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
977 case VK_FORMAT_R64_SINT:
978 decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
980 case VK_FORMAT_R64_UINT:
981 decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
984 decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
988 case VERTEX_ATTRIBUTE_FETCH:
989 if (formatIsR64(format))
991 decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
995 decls << "layout(location = 0) in " << vecType << " attr;\n";
998 default: DE_ASSERT(0);
1004 switch (m_data.descriptorType)
1006 default: DE_ASSERT(0); // Fallthrough
1007 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1008 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1009 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1010 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1011 expectedOOB = "zzzz";
1014 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1015 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1016 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1017 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1018 case VERTEX_ATTRIBUTE_FETCH:
1019 if (numComponents == 1)
1021 expectedOOB = "zzzo";
1023 else if (numComponents == 2)
1025 expectedOOB = "zzzo";
1029 expectedOOB = "zzzz";
1036 switch (m_data.descriptorType)
1038 default: DE_ASSERT(0); // Fallthrough
1039 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1040 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1041 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1042 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1043 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1044 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1045 case VERTEX_ATTRIBUTE_FETCH:
1048 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1049 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1054 if (m_data.nullDescriptor)
1056 checks << " expectedIB = zzzz;\n";
1057 checks << " inboundcoords = 0;\n";
1058 checks << " int paddedinboundcoords = 0;\n";
1059 // Vertex attribute fetch still gets format conversion applied
1060 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1061 expectedOOB = "zzzz";
1065 checks << " expectedIB.x = refData[" << idx << "];\n";
1066 if (numComponents > 1)
1068 checks << " expectedIB.y = refData[" << idx << "+1];\n";
1072 checks << " expectedIB.y = 0;\n";
1074 if (numComponents > 2)
1076 checks << " expectedIB.z = refData[" << idx << "+2];\n";
1077 checks << " expectedIB.w = refData[" << idx << "+3];\n";
1081 checks << " expectedIB.z = 0;\n";
1082 checks << " expectedIB.w = " << defaultw << ";\n";
1085 switch (m_data.descriptorType)
1087 default: DE_ASSERT(0); // Fallthrough
1088 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1089 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1090 // UBOs can either strictly bounds check against inboundcoords, or can
1091 // return the contents from memory for the range padded up to paddedinboundcoords.
1092 checks << " int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1094 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1095 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1096 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1097 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1098 case VERTEX_ATTRIBUTE_FETCH:
1099 checks << " inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1101 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1102 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1103 // set per-component below
1108 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1109 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1110 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1111 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1114 for (int i = 0; i < numCoords; ++i)
1116 // Treat i==3 coord (cube array layer) like i == 2
1117 deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1118 if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1119 checks << " inboundcoords = " << coordDim << ";\n";
1121 string coord = genCoord("c", numCoords, m_data.samples, i);
1122 string inboundcoords =
1123 m_data.nullDescriptor ? "0" :
1124 (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1126 checks << " if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1127 if (m_data.formatQualifier &&
1128 (format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1130 checks << " if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1135 for (int i = 0; i < numCoords; ++i)
1137 // Treat i==3 coord (cube array layer) like i == 2
1138 deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1139 if (!m_data.nullDescriptor)
1141 switch (m_data.descriptorType)
1145 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1146 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1147 checks << " inboundcoords = " << coordDim << ";\n";
1152 string coord = genCoord("c", numCoords, m_data.samples, i);
1154 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1156 if (formatIsR64(format))
1158 checks << " temp.x = attr;\n";
1159 checks << " temp.y = 0l;\n";
1160 checks << " temp.z = 0l;\n";
1161 checks << " temp.w = 0l;\n";
1162 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1166 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1167 checks << " if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1169 // Accumulate any incorrect values.
1170 checks << " accum += abs(temp);\n";
1172 // Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1173 if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1174 !(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1175 (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1177 checks << " temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1179 checks << " expectedIB2 = expectedIB;\n";
1181 // Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1182 if (dataDependsOnLayer && i == numNormalizedCoords)
1183 checks << " if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1185 if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1187 if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1189 checks << " if (temp == zzzz) temp = " << vecType << "(0);\n";
1190 if (m_data.formatQualifier && numComponents < 4)
1191 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1192 checks << " else temp = " << vecType << "(1);\n";
1195 // multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1196 checks << " if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1200 // Storage buffers may be split into per-component loads. Generate a second
1201 // expected out of bounds value where some subset of the components are
1202 // actually in-bounds. If both loads and stores are split into per-component
1203 // accesses, then the result value can be a mix of storeValue and zero.
1204 string expectedOOB2 = expectedOOB;
1205 string expectedOOB3 = expectedOOB;
1206 if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1207 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1208 !m_data.nullDescriptor)
1210 int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1211 int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1212 string sstoreValue = de::toString(storeValue);
1218 expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1219 expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1222 expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1223 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1226 expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1227 expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1232 // Entirely in-bounds.
1233 checks << " if (c >= 0 && c < inboundcoords) {\n"
1234 " if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1237 // normal out-of-bounds value
1238 if (m_data.testRobustness2)
1239 checks << " else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1241 // image_robustness relaxes alpha which is allowed to be zero or one
1242 checks << " else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1244 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1245 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1247 checks << " else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1250 // null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1251 if (m_data.nullDescriptor && m_data.formatQualifier &&
1252 (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1254 checks << " else if (temp == zzzo) temp = " << vecType << "(0);\n";
1256 // non-volatile value replaced with stored value
1257 if (supportsStores(m_data.descriptorType) && !m_data.vol)
1258 checks << " else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1260 // value straddling the boundary, returning a partial vector
1261 if (expectedOOB2 != expectedOOB)
1262 checks << " else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1263 if (expectedOOB3 != expectedOOB)
1264 checks << " else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1267 checks << " else temp = " << vecType << "(1);\n";
1269 // Accumulate any incorrect values.
1270 checks << " accum += abs(temp);\n";
1272 // Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1273 if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1275 // Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1276 string coord0 = genCoord("0", numCoords, m_data.samples, i);
1277 checks << " if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1278 checks << " if (c != 0) temp -= " << expectedOOB << ";\n";
1279 checks << " accum += abs(temp);\n";
1282 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1283 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1285 string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1287 checks << " expectedIB2 = expectedIB;\n";
1289 // Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1290 if (dataDependsOnLayer && i == numNormalizedCoords)
1292 checks << " clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1293 checks << " expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1296 stringstream normexpected;
1297 // Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1298 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1299 m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1300 (layered && i == numCoords-1))
1301 normexpected << " temp -= expectedIB2;\n";
1304 normexpected << " if (c >= 0 && c < inboundcoords)\n";
1305 normexpected << " temp -= expectedIB2;\n";
1306 normexpected << " else\n";
1307 if (m_data.testRobustness2)
1308 normexpected << " temp -= " << expectedOOB << ";\n";
1310 // image_robustness relaxes alpha which is allowed to be zero or one
1311 normexpected << " temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1314 checks << " temp = texture(texture0_1, " << coordNorm << ");\n";
1315 checks << normexpected.str();
1316 checks << " accum += abs(temp);\n";
1317 checks << " temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1318 checks << normexpected.str();
1319 checks << " accum += abs(temp);\n";
1320 checks << " temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1321 checks << normexpected.str();
1322 checks << " accum += abs(temp);\n";
1324 if (m_data.nullDescriptor)
1326 const char *sizeswiz;
1327 switch (m_data.viewType)
1329 default: DE_ASSERT(0); // Fallthrough
1330 case VK_IMAGE_VIEW_TYPE_1D: sizeswiz = ".xxxx"; break;
1331 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: sizeswiz = ".xyxx"; break;
1332 case VK_IMAGE_VIEW_TYPE_2D: sizeswiz = ".xyxx"; break;
1333 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: sizeswiz = ".xyzx"; break;
1334 case VK_IMAGE_VIEW_TYPE_3D: sizeswiz = ".xyzx"; break;
1335 case VK_IMAGE_VIEW_TYPE_CUBE: sizeswiz = ".xyxx"; break;
1336 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: sizeswiz = ".xyzx"; break;
1338 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1340 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1342 checks << " temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1343 checks << " accum += abs(temp);\n";
1345 // checking textureSize with clearly out of range LOD values
1346 checks << " temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1347 checks << " accum += abs(temp);\n";
1348 checks << " temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1349 checks << " accum += abs(temp);\n";
1353 checks << " temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1354 checks << " accum += abs(temp);\n";
1355 checks << " temp = textureSamples(texture0_1).xxxx;\n";
1356 checks << " accum += abs(temp);\n";
1359 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1361 if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1363 checks << " temp = imageSize(image0_1)" << sizeswiz <<";\n";
1364 checks << " accum += abs(temp);\n";
1368 checks << " temp = imageSize(image0_1)" << sizeswiz <<";\n";
1369 checks << " accum += abs(temp);\n";
1370 checks << " temp = imageSamples(image0_1).xxxx;\n";
1371 checks << " accum += abs(temp);\n";
1374 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1375 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1377 // expect zero for runtime-sized array .length()
1378 checks << " temp = " << vecType << "(ssbo0_1.val.length());\n";
1379 checks << " accum += abs(temp);\n";
1380 checks << " temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1381 checks << " accum += abs(temp);\n";
1387 // outside the coordinates loop because we only need to call it once
1388 if (m_data.nullDescriptor &&
1389 m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1390 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1392 checks << " temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1393 checks << " temp = " << vecType << "(temp_ql);\n";
1394 checks << " accum += abs(temp);\n";
1397 const bool is64BitFormat = formatIsR64(m_data.format);
1398 std::string SupportR64 = (is64BitFormat ?
1399 std::string("#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1400 "#extension GL_EXT_shader_image_int64 : require\n") :
1403 switch (m_data.stage)
1405 default: DE_ASSERT(0); // Fallthrough
1408 std::stringstream css;
1410 "#version 450 core\n"
1411 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1412 "#extension GL_EXT_scalar_block_layout : enable\n"
1413 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1414 "#extension GL_EXT_control_flow_attributes : enable\n"
1415 "#extension GL_EXT_shader_image_load_formatted : enable\n"
1418 "layout(local_size_x = 1, local_size_y = 1) in;\n"
1421 " " << vecType << " accum = " << vecType << "(0);\n"
1422 " " << vecType << " temp;\n"
1423 " " << qLevelType << " temp_ql;\n"
1425 " " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1426 " imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1429 programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1430 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1435 std::stringstream css;
1437 "#version 460 core\n"
1438 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1439 "#extension GL_EXT_scalar_block_layout : enable\n"
1440 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1441 "#extension GL_EXT_control_flow_attributes : enable\n"
1442 "#extension GL_NV_ray_tracing : require\n"
1443 "#extension GL_EXT_shader_image_load_formatted : enable\n"
1448 " " << vecType << " accum = " << vecType << "(0);\n"
1449 " " << vecType << " temp;\n"
1450 " " << qLevelType << " temp_ql;\n"
1452 " " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1453 " imageStore(image0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1456 programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1457 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1462 std::stringstream vss;
1464 "#version 450 core\n"
1465 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1466 "#extension GL_EXT_scalar_block_layout : enable\n"
1467 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1468 "#extension GL_EXT_control_flow_attributes : enable\n"
1469 "#extension GL_EXT_shader_image_load_formatted : enable\n"
1474 " " << vecType << " accum = " << vecType << "(0);\n"
1475 " " << vecType << " temp;\n"
1476 " " << qLevelType << " temp_ql;\n"
1478 " " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1479 " imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1480 " gl_PointSize = 1.0f;\n"
1481 " gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1484 programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1485 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1488 case STAGE_FRAGMENT:
1490 if (m_data.nullDescriptor &&
1491 m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1492 m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1494 // as here we only want to check that textureQueryLod returns 0 when
1495 // texture0_1 is null, we don't need to use the actual texture coordinates
1496 // (and modify the vertex shader below to do so). Any coordinates are fine.
1497 // gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1498 std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1499 checks << " vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1500 checks << " temp_ql = " << qLevelType <<
1501 "(ceil(abs(lod.x) + abs(lod.y)));\n";
1502 checks << " temp = " << vecType << "(temp_ql);\n";
1503 checks << " accum += abs(temp);\n";
1506 std::stringstream vss;
1508 "#version 450 core\n"
1511 // full-viewport quad
1512 " gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1515 programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1516 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1518 std::stringstream fss;
1520 "#version 450 core\n"
1521 "#extension GL_EXT_samplerless_texture_functions : enable\n"
1522 "#extension GL_EXT_scalar_block_layout : enable\n"
1523 "#extension GL_EXT_nonuniform_qualifier : enable\n"
1524 "#extension GL_EXT_control_flow_attributes : enable\n"
1525 "#extension GL_EXT_shader_image_load_formatted : enable\n"
1530 " " << vecType << " accum = " << vecType << "(0);\n"
1531 " " << vecType << " temp;\n"
1532 " " << qLevelType << " temp_ql;\n"
1534 " " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1535 " imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1538 programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1539 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1544 // The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1545 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1547 const std::string ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1548 std::stringstream fillShader;
1554 "layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1555 "layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1556 << string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1558 "layout(std430, binding = 1) buffer inputBuffer\n"
1560 " int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1565 " int gx = int(gl_GlobalInvocationID.x);\n"
1566 " int gy = int(gl_GlobalInvocationID.y);\n"
1567 " int gz = int(gl_GlobalInvocationID.z);\n"
1568 " uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1570 for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1572 fillShader << " imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1575 fillShader << "}\n";
1577 programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1578 << vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1583 VkImageType imageViewTypeToImageType (VkImageViewType type)
1587 case VK_IMAGE_VIEW_TYPE_1D:
1588 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return VK_IMAGE_TYPE_1D;
1589 case VK_IMAGE_VIEW_TYPE_2D:
1590 case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1591 case VK_IMAGE_VIEW_TYPE_CUBE:
1592 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return VK_IMAGE_TYPE_2D;
1593 case VK_IMAGE_VIEW_TYPE_3D: return VK_IMAGE_TYPE_3D;
1598 return VK_IMAGE_TYPE_2D;
1601 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1603 return new RobustnessExtsTestInstance(context, m_data);
1606 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1608 const InstanceInterface& vki = m_context.getInstanceInterface();
1609 const VkDevice device = getLogicalDevice(m_context, m_data);
1610 const DeviceDriver vk (m_context.getPlatformInterface(), m_context.getInstance(), device);
1611 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
1612 SimpleAllocator allocator (vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1615 generateLayout(layout, m_data);
1617 // Get needed properties.
1618 VkPhysicalDeviceProperties2 properties;
1619 deMemset(&properties, 0, sizeof(properties));
1620 properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1621 void** pNextTail = &properties.pNext;
1623 VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
1624 deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
1625 rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
1627 VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties;
1628 deMemset(&robustness2Properties, 0, sizeof(robustness2Properties));
1629 robustness2Properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT;
1631 if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
1633 *pNextTail = &rayTracingProperties;
1634 pNextTail = &rayTracingProperties.pNext;
1637 if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1639 *pNextTail = &robustness2Properties;
1640 pNextTail = &robustness2Properties.pNext;
1643 vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1645 if (m_data.testRobustness2)
1647 if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1648 robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1649 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1651 if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1652 robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1653 !deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1654 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1657 VkPipelineBindPoint bindPoint;
1659 switch (m_data.stage)
1662 bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1665 bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1668 bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1672 Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
1673 Move<vk::VkDescriptorPool> descriptorPool;
1674 Move<vk::VkDescriptorSet> descriptorSet;
1676 int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1677 int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1679 vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1681 VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1682 VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1684 // Create a layout and allocate a descriptor set for it.
1686 const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1688 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1692 (deUint32)bindings.size(),
1693 bindings.empty() ? DE_NULL : bindings.data()
1696 descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1698 vk::DescriptorPoolBuilder poolBuilder;
1699 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1700 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1701 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1702 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1703 poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1704 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1705 poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1706 poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1708 descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1710 const void *pNext = DE_NULL;
1712 if (!m_data.pushDescriptor)
1713 descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1715 de::MovePtr<BufferWithMemory> buffer;
1717 deUint8 *bufferPtr = DE_NULL;
1718 if (!m_data.nullDescriptor)
1720 // Create a buffer to hold data for all descriptors.
1721 VkDeviceSize size = de::max(
1722 (VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
1725 VkBufferUsageFlags usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
1726 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1727 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1729 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
1730 usage |= VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
1732 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1733 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1735 size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
1736 usage |= VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1738 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1740 usage |= VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1742 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER)
1744 usage |= VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
1746 else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1748 size = m_data.bufferLen;
1751 buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1752 vk, device, allocator, makeBufferCreateInfo(size, usage), MemoryRequirement::HostVisible));
1753 bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1755 deMemset(bufferPtr, 0x3f, (size_t)size);
1757 deMemset(bufferPtr, 0, m_data.bufferLen);
1758 if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1759 m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1761 deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
1763 else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1764 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1766 deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
1770 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1772 Move<VkDescriptorSetLayout> descriptorSetLayoutR64;
1773 Move<VkDescriptorPool> descriptorPoolR64;
1774 Move<VkDescriptorSet> descriptorSetFillImage;
1775 Move<VkShaderModule> shaderModuleFillImage;
1776 Move<VkPipelineLayout> pipelineLayoutFillImage;
1777 Move<VkPipeline> pipelineFillImage;
1779 Move<VkCommandPool> cmdPool = createCommandPool(vk, device, 0, queueFamilyIndex);
1780 Move<VkCommandBuffer> cmdBuffer = allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1783 vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
1785 const VkImageSubresourceRange barrierRange =
1787 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1788 0u, // deUint32 baseMipLevel;
1789 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
1790 0u, // deUint32 baseArrayLayer;
1791 VK_REMAINING_ARRAY_LAYERS // deUint32 layerCount;
1794 VkImageMemoryBarrier preImageBarrier =
1796 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
1797 DE_NULL, // const void* pNext
1798 0u, // VkAccessFlags srcAccessMask
1799 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
1800 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
1801 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout
1802 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
1803 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
1804 DE_NULL, // VkImage image
1805 barrierRange, // VkImageSubresourceRange subresourceRange;
1808 VkImageMemoryBarrier postImageBarrier =
1810 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1811 DE_NULL, // const void* pNext;
1812 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1813 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1814 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1815 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout;
1816 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1817 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1818 DE_NULL, // VkImage image;
1819 barrierRange, // VkImageSubresourceRange subresourceRange;
1822 vk::VkClearColorValue clearValue;
1823 clearValue.uint32[0] = 0u;
1824 clearValue.uint32[1] = 0u;
1825 clearValue.uint32[2] = 0u;
1826 clearValue.uint32[3] = 0u;
1828 beginCommandBuffer(vk, *cmdBuffer, 0u);
1830 typedef vk::Unique<vk::VkBufferView> BufferViewHandleUp;
1831 typedef de::SharedPtr<BufferViewHandleUp> BufferViewHandleSp;
1832 typedef de::SharedPtr<ImageWithMemory> ImageWithMemorySp;
1833 typedef de::SharedPtr<Unique<VkImageView> > VkImageViewSp;
1834 typedef de::MovePtr<BufferWithMemory> BufferWithMemoryMp;
1836 vector<BufferViewHandleSp> bufferViews(1);
1838 VkImageCreateFlags mutableFormatFlag = 0;
1839 // The 64-bit image tests use a view format which differs from the image.
1840 if (formatIsR64(m_data.format))
1841 mutableFormatFlag = VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT;
1842 VkImageCreateFlags imageCreateFlags = mutableFormatFlag;
1843 if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1844 imageCreateFlags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
1846 const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(m_context.getInstanceInterface(),
1847 m_context.getPhysicalDevice(),
1848 m_data.format).optimalTilingFeatures &
1849 VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
1851 const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
1853 const VkImageCreateInfo outputImageCreateInfo =
1855 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1856 DE_NULL, // const void* pNext;
1857 mutableFormatFlag, // VkImageCreateFlags flags;
1858 VK_IMAGE_TYPE_2D, // VkImageType imageType;
1859 m_data.format, // VkFormat format;
1861 DIM, // deUint32 width;
1862 DIM, // deUint32 height;
1863 1u // deUint32 depth;
1864 }, // VkExtent3D extent;
1865 1u, // deUint32 mipLevels;
1866 1u, // deUint32 arrayLayers;
1867 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1868 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1869 VK_IMAGE_USAGE_STORAGE_BIT
1871 | VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1872 | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1873 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1874 0u, // deUint32 queueFamilyIndexCount;
1875 DE_NULL, // const deUint32* pQueueFamilyIndices;
1876 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1879 deUint32 width = m_data.imageDim[0];
1880 deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
1881 deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1882 deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
1883 m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
1884 m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
1885 m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1887 const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
1889 const VkImageCreateInfo imageCreateInfo =
1891 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1892 DE_NULL, // const void* pNext;
1893 imageCreateFlags, // VkImageCreateFlags flags;
1894 imageViewTypeToImageType(m_data.viewType), // VkImageType imageType;
1895 m_data.format, // VkFormat format;
1897 width, // deUint32 width;
1898 height, // deUint32 height;
1899 depth // deUint32 depth;
1900 }, // VkExtent3D extent;
1901 1u, // deUint32 mipLevels;
1902 layers, // deUint32 arrayLayers;
1903 m_data.samples, // VkSampleCountFlagBits samples;
1904 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1907 | VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1908 | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1909 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1910 0u, // deUint32 queueFamilyIndexCount;
1911 DE_NULL, // const deUint32* pQueueFamilyIndices;
1912 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
1915 VkImageViewCreateInfo imageViewCreateInfo =
1917 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
1918 DE_NULL, // const void* pNext;
1919 (VkImageViewCreateFlags)0u, // VkImageViewCreateFlags flags;
1920 DE_NULL, // VkImage image;
1921 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
1922 m_data.format, // VkFormat format;
1924 VK_COMPONENT_SWIZZLE_IDENTITY,
1925 VK_COMPONENT_SWIZZLE_IDENTITY,
1926 VK_COMPONENT_SWIZZLE_IDENTITY,
1927 VK_COMPONENT_SWIZZLE_IDENTITY
1928 }, // VkComponentMapping components;
1930 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1931 0u, // deUint32 baseMipLevel;
1932 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
1933 0u, // deUint32 baseArrayLayer;
1934 VK_REMAINING_ARRAY_LAYERS // deUint32 layerCount;
1935 } // VkImageSubresourceRange subresourceRange;
1938 vector<ImageWithMemorySp> images(2);
1939 vector<VkImageViewSp> imageViews(2);
1941 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1943 deUint32 *ptr = (deUint32 *)bufferPtr;
1944 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
1947 BufferWithMemoryMp bufferImageR64;
1948 BufferWithMemoryMp bufferOutputImageR64;
1949 const VkDeviceSize sizeOutputR64 = 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
1950 const VkDeviceSize sizeOneLayers = 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
1951 const VkDeviceSize sizeImageR64 = sizeOneLayers * layers;
1953 if (formatIsR64(m_data.format))
1955 bufferOutputImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1956 vk, device, allocator,
1957 makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1958 MemoryRequirement::HostVisible));
1960 deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
1962 for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
1964 bufferUint64Ptr[ndx] = 0;
1966 flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
1968 bufferImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1969 vk, device, allocator,
1970 makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
1971 MemoryRequirement::HostVisible));
1973 for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
1975 bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
1976 bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
1978 for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
1980 bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
1983 flushAlloc(vk, device, bufferImageR64->getAllocation());
1986 for (size_t b = 0; b < bindings.size(); ++b)
1988 VkDescriptorSetLayoutBinding &binding = bindings[b];
1990 if (binding.descriptorCount == 0)
1992 if (b == 1 && m_data.nullDescriptor)
1995 DE_ASSERT(binding.descriptorCount == 1);
1996 switch (binding.descriptorType)
1998 default: DE_ASSERT(0); // Fallthrough
1999 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2000 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2001 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2002 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2004 deUint32 *ptr = (deUint32 *)bufferPtr;
2005 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2008 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2009 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2011 deUint32 *ptr = (deUint32 *)bufferPtr;
2012 deMemcpy(ptr, layout.refData.data(), layout.refData.size());
2014 const vk::VkBufferViewCreateInfo viewCreateInfo =
2016 vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
2018 (vk::VkBufferViewCreateFlags)0,
2020 m_data.format, // format
2021 (vk::VkDeviceSize)0, // offset
2022 (vk::VkDeviceSize)m_data.bufferLen // range
2024 vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
2025 bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2028 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2029 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2031 if (bindings.size() > 1 &&
2032 bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2034 if (m_data.format == VK_FORMAT_R64_SINT)
2035 imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2037 if (m_data.format == VK_FORMAT_R64_UINT)
2038 imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2043 images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2044 imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2048 images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2049 imageViewCreateInfo.viewType = m_data.viewType;
2051 imageViewCreateInfo.image = **images[b];
2052 imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2054 VkImage img = **images[b];
2055 const VkBuffer& bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2056 const VkImageCreateInfo& imageInfo = ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2057 const deUint32 clearLayers = b == 0 ? 1 : layers;
2059 if (!formatIsR64(m_data.format))
2061 preImageBarrier.image = img;
2064 if (formatIsFloat(m_data.format))
2066 deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2068 else if (formatIsSignedInt(m_data.format))
2070 deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2074 deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2077 postImageBarrier.image = img;
2079 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2081 for (unsigned int i = 0; i < clearLayers; ++i)
2083 const VkImageSubresourceRange clearRange =
2085 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2086 0u, // deUint32 baseMipLevel;
2087 VK_REMAINING_MIP_LEVELS, // deUint32 levelCount;
2088 i, // deUint32 baseArrayLayer;
2089 1 // deUint32 layerCount;
2092 vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2094 // Use same data for all faces for cube(array), otherwise make value a function of the layer
2095 if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2097 if (formatIsFloat(m_data.format))
2098 clearValue.float32[0] += 1;
2099 else if (formatIsSignedInt(m_data.format))
2100 clearValue.int32[0] += 1;
2102 clearValue.uint32[0] += 1;
2105 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2109 if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2111 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2112 const VkImageMemoryBarrier imageBarrierPre = makeImageMemoryBarrier(0,
2113 VK_ACCESS_SHADER_WRITE_BIT,
2114 VK_IMAGE_LAYOUT_UNDEFINED,
2115 VK_IMAGE_LAYOUT_GENERAL,
2118 const VkImageMemoryBarrier imageBarrierPost = makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2119 VK_ACCESS_SHADER_READ_BIT,
2120 VK_IMAGE_LAYOUT_GENERAL,
2121 VK_IMAGE_LAYOUT_GENERAL,
2125 descriptorSetLayoutR64 =
2126 DescriptorSetLayoutBuilder()
2127 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2128 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2132 DescriptorPoolBuilder()
2133 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2134 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2135 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2137 descriptorSetFillImage = makeDescriptorSet(vk,
2140 *descriptorSetLayoutR64);
2142 shaderModuleFillImage = createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2143 pipelineLayoutFillImage = makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2144 pipelineFillImage = makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2146 const VkDescriptorImageInfo descResultImageInfo = makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2147 const VkDescriptorBufferInfo descResultBufferInfo = makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2149 DescriptorSetUpdateBuilder()
2150 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2151 .writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2152 .update(vk, device);
2154 vk.cmdPipelineBarrier(*cmdBuffer,
2155 VK_PIPELINE_STAGE_HOST_BIT,
2156 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2157 (VkDependencyFlags)0,
2158 0, (const VkMemoryBarrier*)DE_NULL,
2159 0, (const VkBufferMemoryBarrier*)DE_NULL,
2160 1, &imageBarrierPre);
2162 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2163 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2165 vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2167 vk.cmdPipelineBarrier(*cmdBuffer,
2168 VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2169 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2170 (VkDependencyFlags)0,
2171 0, (const VkMemoryBarrier*)DE_NULL,
2172 0, (const VkBufferMemoryBarrier*)DE_NULL,
2173 1, &imageBarrierPost);
2177 VkDeviceSize size = ((b == 0) ? sizeOutputR64 : sizeImageR64);
2178 const vector<VkBufferImageCopy> bufferImageCopy (1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2180 copyBufferToImage(vk,
2185 VK_IMAGE_ASPECT_COLOR_BIT,
2187 clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2195 const VkSamplerCreateInfo samplerParams =
2197 VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO, // VkStructureType sType;
2198 DE_NULL, // const void* pNext;
2199 0, // VkSamplerCreateFlags flags;
2200 VK_FILTER_NEAREST, // VkFilter magFilter:
2201 VK_FILTER_NEAREST, // VkFilter minFilter;
2202 VK_SAMPLER_MIPMAP_MODE_NEAREST, // VkSamplerMipmapMode mipmapMode;
2203 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeU;
2204 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeV;
2205 VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER, // VkSamplerAddressMode addressModeW;
2206 0.0f, // float mipLodBias;
2207 VK_FALSE, // VkBool32 anistoropyEnable;
2208 1.0f, // float maxAnisotropy;
2209 VK_FALSE, // VkBool32 compareEnable;
2210 VK_COMPARE_OP_ALWAYS, // VkCompareOp compareOp;
2211 0.0f, // float minLod;
2212 0.0f, // float maxLod;
2213 formatIsFloat(m_data.format) ?
2214 VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2215 VK_BORDER_COLOR_INT_TRANSPARENT_BLACK, // VkBorderColor borderColor;
2216 VK_FALSE // VkBool32 unnormalizedCoordinates;
2219 Move<VkSampler> sampler (createSampler(vk, device, &samplerParams));
2221 // Flush modified memory.
2222 if (!m_data.nullDescriptor)
2223 flushAlloc(vk, device, buffer->getAllocation());
2225 const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2227 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
2229 (VkPipelineLayoutCreateFlags)0,
2230 1u, // setLayoutCount
2231 &descriptorSetLayout.get(), // pSetLayouts
2232 0u, // pushConstantRangeCount
2233 DE_NULL, // pPushConstantRanges
2236 Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2238 de::MovePtr<BufferWithMemory> copyBuffer;
2239 copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2240 vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2243 vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2244 vector<VkDescriptorImageInfo> imageInfoVec(2);
2245 vector<VkBufferView> bufferViewVec(2);
2246 vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2250 vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2251 bufTemplateEntriesBefore,
2252 texelBufTemplateEntriesBefore;
2254 for (size_t b = 0; b < bindings.size(); ++b)
2256 VkDescriptorSetLayoutBinding &binding = bindings[b];
2257 // Construct the declaration for the binding
2258 if (binding.descriptorCount > 0)
2261 switch (binding.descriptorType)
2263 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2264 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2266 if (b == 1 && m_data.nullDescriptor)
2267 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2269 imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2271 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2272 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2273 if (b == 1 && m_data.nullDescriptor)
2274 bufferViewVec[vecIndex] = DE_NULL;
2276 bufferViewVec[vecIndex] = **bufferViews[0];
2279 // Other descriptor types.
2280 if (b == 1 && m_data.nullDescriptor)
2281 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2283 bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2287 VkWriteDescriptorSet w =
2289 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // sType
2291 m_data.pushDescriptor ? DE_NULL : *descriptorSet, // dstSet
2292 (deUint32)b, // binding
2293 0, // dstArrayElement
2294 1u, // descriptorCount
2295 binding.descriptorType, // descriptorType
2296 &imageInfoVec[vecIndex], // pImageInfo
2297 &bufferInfoVec[vecIndex], // pBufferInfo
2298 &bufferViewVec[vecIndex], // pTexelBufferView
2301 VkDescriptorUpdateTemplateEntry templateEntry =
2303 (deUint32)b, // uint32_t dstBinding;
2304 0, // uint32_t dstArrayElement;
2305 1u, // uint32_t descriptorCount;
2306 binding.descriptorType, // VkDescriptorType descriptorType;
2307 0, // size_t offset;
2308 0, // size_t stride;
2311 switch (binding.descriptorType)
2313 default: DE_ASSERT(0); // Fallthrough
2314 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2315 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2316 templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2317 imgTemplateEntriesBefore.push_back(templateEntry);
2319 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2320 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2321 templateEntry.offset = vecIndex * sizeof(VkBufferView);
2322 texelBufTemplateEntriesBefore.push_back(templateEntry);
2324 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2325 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2326 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2327 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2328 templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2329 bufTemplateEntriesBefore.push_back(templateEntry);
2335 writesBeforeBindVec.push_back(w);
2337 // Count the number of dynamic descriptors in this set.
2338 if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2339 binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2346 // Make zeros have at least one element so &zeros[0] works
2347 vector<deUint32> zeros(de::max(1,numDynamic));
2348 deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2350 // Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2351 if (m_data.useTemplate)
2353 VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2355 VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO, // VkStructureType sType;
2356 NULL, // void* pNext;
2357 0, // VkDescriptorUpdateTemplateCreateFlags flags;
2358 0, // uint32_t descriptorUpdateEntryCount;
2359 DE_NULL, // uint32_t descriptorUpdateEntryCount;
2360 m_data.pushDescriptor ?
2361 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2362 VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET, // VkDescriptorUpdateTemplateType templateType;
2363 descriptorSetLayout.get(), // VkDescriptorSetLayout descriptorSetLayout;
2364 bindPoint, // VkPipelineBindPoint pipelineBindPoint;
2365 *pipelineLayout, // VkPipelineLayout pipelineLayout;
2369 void *templateVectorData[] =
2371 imageInfoVec.data(),
2372 bufferInfoVec.data(),
2373 bufferViewVec.data(),
2376 vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2378 &imgTemplateEntriesBefore,
2379 &bufTemplateEntriesBefore,
2380 &texelBufTemplateEntriesBefore,
2383 if (m_data.pushDescriptor)
2385 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2387 if (templateVectorsBefore[i]->size())
2389 templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2390 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2391 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2392 vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2398 for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2400 if (templateVectorsBefore[i]->size())
2402 templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2403 templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2404 Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2405 vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2409 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2414 if (m_data.pushDescriptor)
2416 if (writesBeforeBindVec.size())
2418 vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2423 if (writesBeforeBindVec.size())
2425 vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2428 vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2433 Move<VkPipeline> pipeline;
2434 Move<VkRenderPass> renderPass;
2435 Move<VkFramebuffer> framebuffer;
2437 de::MovePtr<BufferWithMemory> sbtBuffer;
2439 if (m_data.stage == STAGE_COMPUTE)
2441 const Unique<VkShaderModule> shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2443 pipeline = makeComputePipeline(vk, device, *pipelineLayout, *shader);
2446 else if (m_data.stage == STAGE_RAYGEN)
2448 const Unique<VkShaderModule> shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2450 const VkPipelineShaderStageCreateInfo shaderCreateInfo =
2452 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2454 (VkPipelineShaderStageCreateFlags)0,
2455 VK_SHADER_STAGE_RAYGEN_BIT_NV, // stage
2458 DE_NULL, // pSpecializationInfo
2461 VkRayTracingShaderGroupCreateInfoNV group =
2463 VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,
2465 VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV, // type
2467 VK_SHADER_UNUSED_NV, // closestHitShader
2468 VK_SHADER_UNUSED_NV, // anyHitShader
2469 VK_SHADER_UNUSED_NV, // intersectionShader
2472 VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2473 VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV, // sType
2477 &shaderCreateInfo, // pStages
2480 0, // maxRecursionDepth
2481 *pipelineLayout, // layout
2482 (vk::VkPipeline)0, // basePipelineHandle
2483 0u, // basePipelineIndex
2486 pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2488 sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2489 vk, device, allocator, makeBufferCreateInfo(rayTracingProperties.shaderGroupHandleSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2491 deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2492 invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2494 vk.getRayTracingShaderGroupHandlesNV(device, *pipeline, 0, 1, rayTracingProperties.shaderGroupHandleSize, ptr);
2498 const VkSubpassDescription subpassDesc =
2500 (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags
2501 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint
2502 0u, // deUint32 inputAttachmentCount
2503 DE_NULL, // const VkAttachmentReference* pInputAttachments
2504 0u, // deUint32 colorAttachmentCount
2505 DE_NULL, // const VkAttachmentReference* pColorAttachments
2506 DE_NULL, // const VkAttachmentReference* pResolveAttachments
2507 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment
2508 0u, // deUint32 preserveAttachmentCount
2509 DE_NULL // const deUint32* pPreserveAttachments
2512 const VkSubpassDependency subpassDependency =
2514 VK_SUBPASS_EXTERNAL, // deUint32 srcSubpass
2515 0, // deUint32 dstSubpass
2516 VK_PIPELINE_STAGE_TRANSFER_BIT, // VkPipelineStageFlags srcStageMask
2517 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, // VkPipelineStageFlags dstStageMask
2518 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask
2519 VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT, // dstAccessMask
2520 VK_DEPENDENCY_BY_REGION_BIT // VkDependencyFlags dependencyFlags
2523 const VkRenderPassCreateInfo renderPassParams =
2525 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureTypei sType
2526 DE_NULL, // const void* pNext
2527 (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
2528 0u, // deUint32 attachmentCount
2529 DE_NULL, // const VkAttachmentDescription* pAttachments
2530 1u, // deUint32 subpassCount
2531 &subpassDesc, // const VkSubpassDescription* pSubpasses
2532 1u, // deUint32 dependencyCount
2533 &subpassDependency // const VkSubpassDependency* pDependencies
2536 renderPass = createRenderPass(vk, device, &renderPassParams);
2538 const vk::VkFramebufferCreateInfo framebufferParams =
2540 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
2542 (vk::VkFramebufferCreateFlags)0,
2543 *renderPass, // renderPass
2544 0u, // attachmentCount
2545 DE_NULL, // pAttachments
2551 framebuffer = createFramebuffer(vk, device, &framebufferParams);
2553 const VkVertexInputBindingDescription vertexInputBindingDescription =
2555 0u, // deUint32 binding
2556 (deUint32)formatBytes, // deUint32 stride
2557 VK_VERTEX_INPUT_RATE_VERTEX, // VkVertexInputRate inputRate
2560 const VkVertexInputAttributeDescription vertexInputAttributeDescription =
2562 0u, // deUint32 location
2563 0u, // deUint32 binding
2564 m_data.format, // VkFormat format
2565 0u // deUint32 offset
2568 deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2570 const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
2572 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2573 DE_NULL, // const void* pNext;
2574 (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags;
2575 numAttribs, // deUint32 vertexBindingDescriptionCount;
2576 &vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2577 numAttribs, // deUint32 vertexAttributeDescriptionCount;
2578 &vertexInputAttributeDescription // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2581 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo =
2583 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
2584 DE_NULL, // const void* pNext;
2585 (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags;
2586 (m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology topology;
2587 VK_FALSE // VkBool32 primitiveRestartEnable;
2590 const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo =
2592 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
2593 DE_NULL, // const void* pNext;
2594 (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags;
2595 VK_FALSE, // VkBool32 depthClampEnable;
2596 (m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE, // VkBool32 rasterizerDiscardEnable;
2597 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
2598 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
2599 VK_FRONT_FACE_CLOCKWISE, // VkFrontFace frontFace;
2600 VK_FALSE, // VkBool32 depthBiasEnable;
2601 0.0f, // float depthBiasConstantFactor;
2602 0.0f, // float depthBiasClamp;
2603 0.0f, // float depthBiasSlopeFactor;
2604 1.0f // float lineWidth;
2607 const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo =
2609 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType
2610 DE_NULL, // const void* pNext
2611 0u, // VkPipelineMultisampleStateCreateFlags flags
2612 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples
2613 VK_FALSE, // VkBool32 sampleShadingEnable
2614 1.0f, // float minSampleShading
2615 DE_NULL, // const VkSampleMask* pSampleMask
2616 VK_FALSE, // VkBool32 alphaToCoverageEnable
2617 VK_FALSE // VkBool32 alphaToOneEnable
2620 VkViewport viewport = makeViewport(DIM, DIM);
2621 VkRect2D scissor = makeRect2D(DIM, DIM);
2623 const VkPipelineViewportStateCreateInfo viewportStateCreateInfo =
2625 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
2626 DE_NULL, // const void* pNext
2627 (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
2628 1u, // deUint32 viewportCount
2629 &viewport, // const VkViewport* pViewports
2630 1u, // deUint32 scissorCount
2631 &scissor // const VkRect2D* pScissors
2634 Move<VkShaderModule> fs;
2635 Move<VkShaderModule> vs;
2638 if (m_data.stage == STAGE_VERTEX)
2640 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2641 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2646 vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2647 fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2651 const VkPipelineShaderStageCreateInfo shaderCreateInfo[2] =
2654 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2656 (VkPipelineShaderStageCreateFlags)0,
2657 VK_SHADER_STAGE_VERTEX_BIT, // stage
2660 DE_NULL, // pSpecializationInfo
2663 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2665 (VkPipelineShaderStageCreateFlags)0,
2666 VK_SHADER_STAGE_FRAGMENT_BIT, // stage
2669 DE_NULL, // pSpecializationInfo
2673 const VkGraphicsPipelineCreateInfo graphicsPipelineCreateInfo =
2675 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
2676 DE_NULL, // const void* pNext;
2677 (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags;
2678 numStages, // deUint32 stageCount;
2679 &shaderCreateInfo[0], // const VkPipelineShaderStageCreateInfo* pStages;
2680 &vertexInputStateCreateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
2681 &inputAssemblyStateCreateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
2682 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
2683 &viewportStateCreateInfo, // const VkPipelineViewportStateCreateInfo* pViewportState;
2684 &rasterizationStateCreateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
2685 &multisampleStateCreateInfo, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
2686 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
2687 DE_NULL, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
2688 DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
2689 pipelineLayout.get(), // VkPipelineLayout layout;
2690 renderPass.get(), // VkRenderPass renderPass;
2691 0u, // deUint32 subpass;
2692 DE_NULL, // VkPipeline basePipelineHandle;
2693 0 // int basePipelineIndex;
2696 pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2699 const VkImageMemoryBarrier imageBarrier =
2701 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType
2702 DE_NULL, // const void* pNext
2703 0u, // VkAccessFlags srcAccessMask
2704 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask
2705 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout
2706 VK_IMAGE_LAYOUT_GENERAL, // VkImageLayout newLayout
2707 VK_QUEUE_FAMILY_IGNORED, // uint32_t srcQueueFamilyIndex
2708 VK_QUEUE_FAMILY_IGNORED, // uint32_t dstQueueFamilyIndex
2709 **images[0], // VkImage image
2711 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask
2712 0u, // uint32_t baseMipLevel
2713 1u, // uint32_t mipLevels,
2714 0u, // uint32_t baseArray
2715 1u, // uint32_t arraySize
2719 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2720 (VkDependencyFlags)0,
2721 0, (const VkMemoryBarrier*)DE_NULL,
2722 0, (const VkBufferMemoryBarrier*)DE_NULL,
2725 vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
2727 if (!formatIsR64(m_data.format))
2729 VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2730 VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
2732 vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
2736 const vector<VkBufferImageCopy> bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
2737 copyBufferToImage(vk,
2739 *(*bufferOutputImageR64),
2742 VK_IMAGE_ASPECT_COLOR_BIT,
2744 1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2747 VkMemoryBarrier memBarrier =
2749 VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
2751 0u, // srcAccessMask
2752 0u, // dstAccessMask
2755 memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2756 memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2757 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
2758 0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2760 if (m_data.stage == STAGE_COMPUTE)
2762 vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
2764 else if (m_data.stage == STAGE_RAYGEN)
2766 vk.cmdTraceRaysNV(*cmdBuffer,
2775 beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
2776 makeRect2D(DIM, DIM),
2777 0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
2778 // Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
2779 if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2781 VkDeviceSize zeroOffset = 0;
2782 VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
2783 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
2784 vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
2786 if (m_data.stage == STAGE_VERTEX)
2788 vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
2792 vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
2794 endRenderPass(vk, *cmdBuffer);
2797 memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2798 memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
2799 vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
2800 0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2802 const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
2803 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
2804 vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, ©Region);
2806 endCommandBuffer(vk, *cmdBuffer);
2808 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
2810 void *ptr = copyBuffer->getAllocation().getHostPtr();
2812 invalidateAlloc(vk, device, copyBuffer->getAllocation());
2814 qpTestResult res = QP_TEST_RESULT_PASS;
2816 for (deUint32 i = 0; i < DIM*DIM; ++i)
2818 if (formatIsFloat(m_data.format))
2820 if (((float *)ptr)[i * numComponents] != 1.0f)
2822 res = QP_TEST_RESULT_FAIL;
2825 else if (formatIsR64(m_data.format))
2827 if (((deUint64 *)ptr)[i * numComponents] != 1)
2829 res = QP_TEST_RESULT_FAIL;
2834 if (((deUint32 *)ptr)[i * numComponents] != 1)
2836 res = QP_TEST_RESULT_FAIL;
2841 return tcu::TestStatus(res, qpGetTestResultName(res));
2846 static void createTests (tcu::TestCaseGroup* group, bool robustness2)
2848 tcu::TestContext& testCtx = group->getTestContext();
2854 const char* description;
2857 TestGroupCase fmtCases[] =
2859 { VK_FORMAT_R32_SINT, "r32i", "" },
2860 { VK_FORMAT_R32_UINT, "r32ui", "" },
2861 { VK_FORMAT_R32_SFLOAT, "r32f", "" },
2862 { VK_FORMAT_R32G32_SINT, "rg32i", "" },
2863 { VK_FORMAT_R32G32_UINT, "rg32ui", "" },
2864 { VK_FORMAT_R32G32_SFLOAT, "rg32f", "" },
2865 { VK_FORMAT_R32G32B32A32_SINT, "rgba32i", "" },
2866 { VK_FORMAT_R32G32B32A32_UINT, "rgba32ui", "" },
2867 { VK_FORMAT_R32G32B32A32_SFLOAT, "rgba32f", "" },
2868 { VK_FORMAT_R64_SINT, "r64i", "" },
2869 { VK_FORMAT_R64_UINT, "r64ui", "" },
2872 TestGroupCase fullDescCases[] =
2874 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, "uniform_buffer", "" },
2875 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, "storage_buffer", "" },
2876 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, "uniform_buffer_dynamic", "" },
2877 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, "storage_buffer_dynamic", "" },
2878 { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, "uniform_texel_buffer", "" },
2879 { VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, "storage_texel_buffer", "" },
2880 { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image", "" },
2881 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image", "" },
2882 { VERTEX_ATTRIBUTE_FETCH, "vertex_attribute_fetch", "" },
2885 TestGroupCase imgDescCases[] =
2887 { VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, "storage_image", "" },
2888 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, "sampled_image", "" },
2891 TestGroupCase fullLenCases32Bit[] =
2893 { ~0U, "null_descriptor", "" },
2897 { 12, "len_12", "" },
2898 { 16, "len_16", "" },
2899 { 20, "len_20", "" },
2900 { 31, "len_31", "" },
2901 { 32, "len_32", "" },
2902 { 33, "len_33", "" },
2903 { 35, "len_35", "" },
2904 { 36, "len_36", "" },
2905 { 39, "len_39", "" },
2906 { 40, "len_41", "" },
2907 { 252, "len_252", "" },
2908 { 256, "len_256", "" },
2909 { 260, "len_260", "" },
2912 TestGroupCase fullLenCases64Bit[] =
2914 { ~0U, "null_descriptor", "" },
2917 { 16, "len_16", "" },
2918 { 24, "len_24", "" },
2919 { 32, "len_32", "" },
2920 { 40, "len_40", "" },
2921 { 62, "len_62", "" },
2922 { 64, "len_64", "" },
2923 { 66, "len_66", "" },
2924 { 70, "len_70", "" },
2925 { 72, "len_72", "" },
2926 { 78, "len_78", "" },
2927 { 80, "len_80", "" },
2928 { 504, "len_504", "" },
2929 { 512, "len_512", "" },
2930 { 520, "len_520", "" },
2933 TestGroupCase imgLenCases[] =
2938 TestGroupCase viewCases[] =
2940 { VK_IMAGE_VIEW_TYPE_1D, "1d", "" },
2941 { VK_IMAGE_VIEW_TYPE_2D, "2d", "" },
2942 { VK_IMAGE_VIEW_TYPE_3D, "3d", "" },
2943 { VK_IMAGE_VIEW_TYPE_CUBE, "cube", "" },
2944 { VK_IMAGE_VIEW_TYPE_1D_ARRAY, "1d_array", "" },
2945 { VK_IMAGE_VIEW_TYPE_2D_ARRAY, "2d_array", "" },
2946 { VK_IMAGE_VIEW_TYPE_CUBE_ARRAY, "cube_array", "" },
2949 TestGroupCase sampCases[] =
2951 { VK_SAMPLE_COUNT_1_BIT, "samples_1", "" },
2952 { VK_SAMPLE_COUNT_4_BIT, "samples_4", "" },
2955 TestGroupCase stageCases[] =
2957 { STAGE_COMPUTE, "comp", "compute" },
2958 { STAGE_FRAGMENT, "frag", "fragment" },
2959 { STAGE_VERTEX, "vert", "vertex" },
2960 { STAGE_RAYGEN, "rgen", "raygen" },
2963 TestGroupCase volCases[] =
2965 { 0, "nonvolatile", "" },
2966 { 1, "volatile", "" },
2969 TestGroupCase unrollCases[] =
2971 { 0, "dontunroll", "" },
2972 { 1, "unroll", "" },
2975 TestGroupCase tempCases[] =
2977 { 0, "notemplate", "" },
2978 { 1, "template", "" },
2981 TestGroupCase pushCases[] =
2987 TestGroupCase fmtQualCases[] =
2989 { 0, "no_fmt_qual", "" },
2990 { 1, "fmt_qual", "" },
2993 TestGroupCase readOnlyCases[] =
2995 { 0, "readwrite", "" },
2996 { 1, "readonly", "" },
2999 for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
3001 de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
3002 for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
3004 de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name, tempCases[tempNdx].name));
3005 for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
3007 de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name, fmtCases[fmtNdx].name));
3009 int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
3011 for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
3013 de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name, unrollCases[unrollNdx].name));
3014 for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
3016 de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name, volCases[volNdx].name));
3018 int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
3019 TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
3021 for (int descNdx = 0; descNdx < numDescCases; descNdx++)
3023 de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
3025 for (int roNdx = 0; roNdx < DE_LENGTH_OF_ARRAY(readOnlyCases); roNdx++)
3027 de::MovePtr<tcu::TestCaseGroup> rwGroup(new tcu::TestCaseGroup(testCtx, readOnlyCases[roNdx].name, readOnlyCases[roNdx].name));
3029 // readonly cases are just for storage_buffer
3030 if (readOnlyCases[roNdx].count != 0 &&
3031 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER &&
3032 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
3035 for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
3037 de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
3039 // format qualifier is only used for storage image and storage texel buffers
3040 if (fmtQualCases[fmtQualNdx].count &&
3041 !(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
3044 if (pushCases[pushNdx].count &&
3045 (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3048 const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3049 int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3050 TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3052 for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3054 if (lenCases[lenNdx].count != ~0U)
3056 bool bufferLen = lenCases[lenNdx].count != 0;
3057 bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3058 if (bufferLen != bufferDesc)
3061 // Add template tests cases only for null_descriptor cases
3062 if (tempCases[tempNdx].count)
3066 if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3067 ((lenCases[lenNdx].count % fmtSize) != 0) &&
3068 lenCases[lenNdx].count != ~0U)
3073 // "volatile" only applies to storage images/buffers
3074 if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3077 de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
3078 for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3080 de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
3081 for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3083 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3084 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3085 descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3087 // buffer descriptors don't have different dimensionalities. Only test "1D"
3091 if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3092 sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3097 de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
3098 for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3100 Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3101 VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3102 VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3103 if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3105 allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
3106 allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
3109 if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3110 currentStage != STAGE_VERTEX)
3113 deUint32 imageDim[3] = {5, 11, 6};
3114 if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3115 viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3116 imageDim[1] = imageDim[0];
3120 (VkFormat)fmtCases[fmtNdx].count, // VkFormat format;
3121 currentStage, // Stage stage;
3122 allShaderStages, // VkFlags allShaderStages;
3123 allPipelineStages, // VkFlags allPipelineStages;
3124 (int)descCases[descNdx].count, // VkDescriptorType descriptorType;
3125 (VkImageViewType)viewCases[viewNdx].count, // VkImageViewType viewType;
3126 (VkSampleCountFlagBits)sampCases[sampNdx].count, // VkSampleCountFlagBits samples;
3127 (int)lenCases[lenNdx].count, // int bufferLen;
3128 (bool)unrollCases[unrollNdx].count, // bool unroll;
3129 (bool)volCases[volNdx].count, // bool vol;
3130 (bool)(lenCases[lenNdx].count == ~0U), // bool nullDescriptor
3131 (bool)tempCases[tempNdx].count, // bool useTemplate
3132 (bool)fmtQualCases[fmtQualNdx].count, // bool formatQualifier
3133 (bool)pushCases[pushNdx].count, // bool pushDescriptor;
3134 (bool)robustness2, // bool testRobustness2;
3135 { imageDim[0], imageDim[1], imageDim[2] }, // deUint32 imageDim[3];
3136 (bool)(readOnlyCases[roNdx].count == 1), // bool readOnly;
3139 viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
3141 sampGroup->addChild(viewGroup.release());
3143 lenGroup->addChild(sampGroup.release());
3145 fmtQualGroup->addChild(lenGroup.release());
3147 // Put storage_buffer tests in separate readonly vs readwrite groups. Other types
3148 // go directly into descGroup
3149 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3150 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3151 rwGroup->addChild(fmtQualGroup.release());
3153 descGroup->addChild(fmtQualGroup.release());
3156 if (descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
3157 descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
3158 descGroup->addChild(rwGroup.release());
3161 volGroup->addChild(descGroup.release());
3163 unrollGroup->addChild(volGroup.release());
3165 fmtGroup->addChild(unrollGroup.release());
3167 tempGroup->addChild(fmtGroup.release());
3169 pushGroup->addChild(tempGroup.release());
3171 group->addChild(pushGroup.release());
3175 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3177 createTests(group, /*robustness2=*/true);
3180 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3182 createTests(group, /*robustness2=*/false);
3185 static void cleanupGroup (tcu::TestCaseGroup* group)
3188 // Destroy singleton objects.
3189 Robustness2Int64AtomicsSingleton::destroy();
3190 ImageRobustnessInt64AtomicsSingleton::destroy();
3191 ImageRobustnessSingleton::destroy();
3192 Robustness2Singleton::destroy();
3195 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
3197 return createTestGroup(testCtx, "robustness2", "VK_EXT_robustness2 tests",
3198 createRobustness2Tests, cleanupGroup);
3201 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
3203 return createTestGroup(testCtx, "image_robustness", "VK_EXT_image_robustness tests",
3204 createImageRobustnessTests, cleanupGroup);