1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
13 * http://www.apache.org/licenses/LICENSE-2.0
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
23 * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
24 *//*--------------------------------------------------------------------*/
26 #include "vktOpaqueTypeIndexingTests.hpp"
28 #include "vkRefUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkTypeUtil.hpp"
32 #include "vkQueryUtil.hpp"
34 #include "tcuTexture.hpp"
35 #include "tcuTestLog.hpp"
36 #include "tcuVectorUtil.hpp"
37 #include "tcuTextureUtil.hpp"
39 #include "deStringUtil.hpp"
40 #include "deSharedPtr.hpp"
41 #include "deRandom.hpp"
42 #include "deSTLUtil.hpp"
44 #include "vktShaderExecutor.hpp"
50 namespace shaderexecutor
63 typedef SharedPtr<Unique<VkSampler> > VkSamplerSp;
70 Buffer (Context& context, VkBufferUsageFlags usage, size_t size);
72 VkBuffer getBuffer (void) const { return *m_buffer; }
73 void* getHostPtr (void) const { return m_allocation->getHostPtr(); }
75 void invalidate (void);
78 const DeviceInterface& m_vkd;
79 const VkDevice m_device;
80 const Unique<VkBuffer> m_buffer;
81 const UniquePtr<Allocation> m_allocation;
84 typedef de::SharedPtr<Buffer> BufferSp;
86 Move<VkBuffer> createBuffer (const DeviceInterface& vkd, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usageFlags)
88 const VkBufferCreateInfo createInfo =
90 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
92 (VkBufferCreateFlags)0,
95 VK_SHARING_MODE_EXCLUSIVE,
99 return createBuffer(vkd, device, &createInfo);
102 MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkBuffer buffer)
104 MovePtr<Allocation> alloc (allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
106 VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
111 Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
112 : m_vkd (context.getDeviceInterface())
113 , m_device (context.getDevice())
114 , m_buffer (createBuffer (context.getDeviceInterface(),
118 , m_allocation (allocateAndBindMemory (context.getDeviceInterface(),
120 context.getDefaultAllocator(),
125 void Buffer::flush (void)
127 flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
130 void Buffer::invalidate (void)
132 invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
135 MovePtr<Buffer> createUniformIndexBuffer (Context& context, int numIndices, const int* indices)
137 MovePtr<Buffer> buffer (new Buffer(context, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(int)*numIndices));
138 int* const bufPtr = (int*)buffer->getHostPtr();
140 for (int ndx = 0; ndx < numIndices; ++ndx)
141 bufPtr[ndx] = indices[ndx];
152 INDEX_EXPR_TYPE_CONST_LITERAL = 0,
153 INDEX_EXPR_TYPE_CONST_EXPRESSION,
154 INDEX_EXPR_TYPE_UNIFORM,
155 INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
165 TEXTURE_TYPE_2D_ARRAY,
171 class OpaqueTypeIndexingCase : public TestCase
174 OpaqueTypeIndexingCase (tcu::TestContext& testCtx,
176 const char* description,
177 const glu::ShaderType shaderType,
178 const IndexExprType indexExprType);
179 virtual ~OpaqueTypeIndexingCase (void);
181 virtual void initPrograms (vk::SourceCollections& programCollection) const
183 generateSources(m_shaderType, m_shaderSpec, programCollection);
188 const glu::ShaderType m_shaderType;
189 const IndexExprType m_indexExprType;
190 ShaderSpec m_shaderSpec;
193 OpaqueTypeIndexingCase::OpaqueTypeIndexingCase (tcu::TestContext& testCtx,
195 const char* description,
196 const glu::ShaderType shaderType,
197 const IndexExprType indexExprType)
198 : TestCase (testCtx, name, description)
200 , m_shaderType (shaderType)
201 , m_indexExprType (indexExprType)
205 OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase (void)
209 class OpaqueTypeIndexingTestInstance : public TestInstance
212 OpaqueTypeIndexingTestInstance (Context& context,
213 const glu::ShaderType shaderType,
214 const ShaderSpec& shaderSpec,
216 const IndexExprType indexExprType);
217 virtual ~OpaqueTypeIndexingTestInstance (void);
219 virtual tcu::TestStatus iterate (void) = 0;
222 void checkSupported (const VkDescriptorType descriptorType);
225 tcu::TestContext& m_testCtx;
226 const glu::ShaderType m_shaderType;
227 const ShaderSpec& m_shaderSpec;
229 const IndexExprType m_indexExprType;
232 OpaqueTypeIndexingTestInstance::OpaqueTypeIndexingTestInstance (Context& context,
233 const glu::ShaderType shaderType,
234 const ShaderSpec& shaderSpec,
236 const IndexExprType indexExprType)
237 : TestInstance (context)
238 , m_testCtx (context.getTestContext())
239 , m_shaderType (shaderType)
240 , m_shaderSpec (shaderSpec)
242 , m_indexExprType (indexExprType)
246 OpaqueTypeIndexingTestInstance::~OpaqueTypeIndexingTestInstance (void)
250 void OpaqueTypeIndexingTestInstance::checkSupported (const VkDescriptorType descriptorType)
252 const VkPhysicalDeviceFeatures& deviceFeatures = m_context.getDeviceFeatures();
254 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
256 switch (descriptorType)
258 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
259 if (!deviceFeatures.shaderSampledImageArrayDynamicIndexing)
260 TCU_THROW(NotSupportedError, "Dynamic indexing of sampler arrays is not supported");
263 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
264 if (!deviceFeatures.shaderUniformBufferArrayDynamicIndexing)
265 TCU_THROW(NotSupportedError, "Dynamic indexing of uniform buffer arrays is not supported");
268 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
269 if (!deviceFeatures.shaderStorageBufferArrayDynamicIndexing)
270 TCU_THROW(NotSupportedError, "Dynamic indexing of storage buffer arrays is not supported");
279 static void declareUniformIndexVars (std::ostream& str, deUint32 bindingLocation, const char* varPrefix, int numVars)
281 str << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = " << bindingLocation << ", std140) uniform Indices\n{\n";
283 for (int varNdx = 0; varNdx < numVars; varNdx++)
284 str << "\thighp int " << varPrefix << varNdx << ";\n";
289 static TextureType getTextureType (glu::DataType samplerType)
293 case glu::TYPE_SAMPLER_1D:
294 case glu::TYPE_INT_SAMPLER_1D:
295 case glu::TYPE_UINT_SAMPLER_1D:
296 case glu::TYPE_SAMPLER_1D_SHADOW:
297 return TEXTURE_TYPE_1D;
299 case glu::TYPE_SAMPLER_2D:
300 case glu::TYPE_INT_SAMPLER_2D:
301 case glu::TYPE_UINT_SAMPLER_2D:
302 case glu::TYPE_SAMPLER_2D_SHADOW:
303 return TEXTURE_TYPE_2D;
305 case glu::TYPE_SAMPLER_CUBE:
306 case glu::TYPE_INT_SAMPLER_CUBE:
307 case glu::TYPE_UINT_SAMPLER_CUBE:
308 case glu::TYPE_SAMPLER_CUBE_SHADOW:
309 return TEXTURE_TYPE_CUBE;
311 case glu::TYPE_SAMPLER_2D_ARRAY:
312 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
313 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
314 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
315 return TEXTURE_TYPE_2D_ARRAY;
317 case glu::TYPE_SAMPLER_3D:
318 case glu::TYPE_INT_SAMPLER_3D:
319 case glu::TYPE_UINT_SAMPLER_3D:
320 return TEXTURE_TYPE_3D;
323 throw tcu::InternalError("Invalid sampler type");
327 static bool isShadowSampler (glu::DataType samplerType)
329 return samplerType == glu::TYPE_SAMPLER_1D_SHADOW ||
330 samplerType == glu::TYPE_SAMPLER_2D_SHADOW ||
331 samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW ||
332 samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW;
335 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
339 case glu::TYPE_SAMPLER_1D:
340 case glu::TYPE_SAMPLER_2D:
341 case glu::TYPE_SAMPLER_CUBE:
342 case glu::TYPE_SAMPLER_2D_ARRAY:
343 case glu::TYPE_SAMPLER_3D:
344 return glu::TYPE_FLOAT_VEC4;
346 case glu::TYPE_SAMPLER_1D_SHADOW:
347 case glu::TYPE_SAMPLER_2D_SHADOW:
348 case glu::TYPE_SAMPLER_CUBE_SHADOW:
349 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
350 return glu::TYPE_FLOAT;
352 case glu::TYPE_INT_SAMPLER_1D:
353 case glu::TYPE_INT_SAMPLER_2D:
354 case glu::TYPE_INT_SAMPLER_CUBE:
355 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
356 case glu::TYPE_INT_SAMPLER_3D:
357 return glu::TYPE_INT_VEC4;
359 case glu::TYPE_UINT_SAMPLER_1D:
360 case glu::TYPE_UINT_SAMPLER_2D:
361 case glu::TYPE_UINT_SAMPLER_CUBE:
362 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
363 case glu::TYPE_UINT_SAMPLER_3D:
364 return glu::TYPE_UINT_VEC4;
367 throw tcu::InternalError("Invalid sampler type");
371 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
373 const glu::DataType outType = getSamplerOutputType(samplerType);
374 const glu::DataType outScalarType = glu::getDataTypeScalarType(outType);
376 switch (outScalarType)
378 case glu::TYPE_FLOAT:
379 if (isShadowSampler(samplerType))
380 return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
382 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
384 case glu::TYPE_INT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
385 case glu::TYPE_UINT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
388 throw tcu::InternalError("Invalid sampler type");
392 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
394 const TextureType texType = getTextureType(samplerType);
399 case TEXTURE_TYPE_1D: numCoords = 1; break;
400 case TEXTURE_TYPE_2D: numCoords = 2; break;
401 case TEXTURE_TYPE_2D_ARRAY: numCoords = 3; break;
402 case TEXTURE_TYPE_CUBE: numCoords = 3; break;
403 case TEXTURE_TYPE_3D: numCoords = 3; break;
408 if (isShadowSampler(samplerType))
411 DE_ASSERT(de::inRange(numCoords, 1, 4));
413 return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
416 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
418 DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
420 if (access.getFormat().order == tcu::TextureFormat::D)
422 // \note Texture uses odd values, lookup even values to avoid precision issues.
423 const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
425 for (int ndx = 0; ndx < access.getWidth(); ndx++)
426 access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
430 TCU_CHECK_INTERNAL(access.getFormat().order == tcu::TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
432 for (int ndx = 0; ndx < access.getWidth(); ndx++)
433 *((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
437 static vk::VkImageType getVkImageType (TextureType texType)
441 case TEXTURE_TYPE_1D: return vk::VK_IMAGE_TYPE_1D;
442 case TEXTURE_TYPE_2D:
443 case TEXTURE_TYPE_2D_ARRAY: return vk::VK_IMAGE_TYPE_2D;
444 case TEXTURE_TYPE_CUBE: return vk::VK_IMAGE_TYPE_2D;
445 case TEXTURE_TYPE_3D: return vk::VK_IMAGE_TYPE_3D;
447 DE_FATAL("Impossible");
448 return (vk::VkImageType)0;
452 static vk::VkImageViewType getVkImageViewType (TextureType texType)
456 case TEXTURE_TYPE_1D: return vk::VK_IMAGE_VIEW_TYPE_1D;
457 case TEXTURE_TYPE_2D: return vk::VK_IMAGE_VIEW_TYPE_2D;
458 case TEXTURE_TYPE_2D_ARRAY: return vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY;
459 case TEXTURE_TYPE_CUBE: return vk::VK_IMAGE_VIEW_TYPE_CUBE;
460 case TEXTURE_TYPE_3D: return vk::VK_IMAGE_VIEW_TYPE_3D;
462 DE_FATAL("Impossible");
463 return (vk::VkImageViewType)0;
467 //! Test image with 1-pixel dimensions and no mipmaps
471 TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue);
473 VkImageView getImageView (void) const { return *m_imageView; }
476 const Unique<VkImage> m_image;
477 const UniquePtr<Allocation> m_allocation;
478 const Unique<VkImageView> m_imageView;
481 Move<VkImage> createTestImage (const DeviceInterface& vkd, VkDevice device, TextureType texType, tcu::TextureFormat format)
483 const VkImageCreateInfo createInfo =
485 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
487 (texType == TEXTURE_TYPE_CUBE ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0),
488 getVkImageType(texType),
489 mapTextureFormat(format),
490 makeExtent3D(1, 1, 1),
492 (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u,
493 VK_SAMPLE_COUNT_1_BIT,
494 VK_IMAGE_TILING_OPTIMAL,
495 VK_IMAGE_USAGE_SAMPLED_BIT|VK_IMAGE_USAGE_TRANSFER_DST_BIT,
496 VK_SHARING_MODE_EXCLUSIVE,
499 VK_IMAGE_LAYOUT_UNDEFINED
502 return createImage(vkd, device, &createInfo);
505 de::MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkImage image)
507 de::MovePtr<Allocation> alloc = allocator.allocate(getImageMemoryRequirements(vkd, device, image), MemoryRequirement::Any);
509 VK_CHECK(vkd.bindImageMemory(device, image, alloc->getMemory(), alloc->getOffset()));
514 Move<VkImageView> createTestImageView (const DeviceInterface& vkd, VkDevice device, VkImage image, TextureType texType, tcu::TextureFormat format)
516 const bool isDepthImage = format.order == tcu::TextureFormat::D;
517 const VkImageViewCreateInfo createInfo =
519 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
521 (VkImageViewCreateFlags)0,
523 getVkImageViewType(texType),
524 mapTextureFormat(format),
526 VK_COMPONENT_SWIZZLE_IDENTITY,
527 VK_COMPONENT_SWIZZLE_IDENTITY,
528 VK_COMPONENT_SWIZZLE_IDENTITY,
529 VK_COMPONENT_SWIZZLE_IDENTITY,
532 (VkImageAspectFlags)(isDepthImage ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
536 (texType == TEXTURE_TYPE_CUBE ? 6u : 1u)
540 return createImageView(vkd, device, &createInfo);
543 TestImage::TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue)
544 : m_image (createTestImage (context.getDeviceInterface(), context.getDevice(), texType, format))
545 , m_allocation (allocateAndBindMemory (context.getDeviceInterface(), context.getDevice(), context.getDefaultAllocator(), *m_image))
546 , m_imageView (createTestImageView (context.getDeviceInterface(), context.getDevice(), *m_image, texType, format))
548 const DeviceInterface& vkd = context.getDeviceInterface();
549 const VkDevice device = context.getDevice();
551 const size_t pixelSize = (size_t)format.getPixelSize();
552 const deUint32 numLayers = (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u;
553 const size_t numReplicas = (size_t)numLayers;
554 const size_t stagingBufferSize = pixelSize*numReplicas;
556 const VkBufferCreateInfo stagingBufferInfo =
558 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
560 (VkBufferCreateFlags)0u,
561 (VkDeviceSize)stagingBufferSize,
562 (VkBufferCreateFlags)VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
563 VK_SHARING_MODE_EXCLUSIVE,
567 const Unique<VkBuffer> stagingBuffer (createBuffer(vkd, device, &stagingBufferInfo));
568 const UniquePtr<Allocation> alloc (context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vkd, device, *stagingBuffer), MemoryRequirement::HostVisible));
570 VK_CHECK(vkd.bindBufferMemory(device, *stagingBuffer, alloc->getMemory(), alloc->getOffset()));
572 for (size_t ndx = 0; ndx < numReplicas; ++ndx)
573 deMemcpy((deUint8*)alloc->getHostPtr() + ndx*pixelSize, colorValue, pixelSize);
575 flushMappedMemoryRange(vkd, device, alloc->getMemory(), alloc->getOffset(), VK_WHOLE_SIZE);
578 const Unique<VkCommandPool> cmdPool (createCommandPool(vkd, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, context.getUniversalQueueFamilyIndex()));
579 const Unique<VkCommandBuffer> cmdBuf (allocateCommandBuffer(vkd, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
580 const VkCommandBufferBeginInfo beginInfo =
582 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
584 (VkCommandBufferUsageFlags)VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
585 (const VkCommandBufferInheritanceInfo*)DE_NULL,
587 const VkImageAspectFlags imageAspect = (VkImageAspectFlags)(format.order == tcu::TextureFormat::D ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT);
588 const VkBufferImageCopy copyInfo =
602 const VkImageMemoryBarrier preCopyBarrier =
604 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
607 (VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
608 VK_IMAGE_LAYOUT_UNDEFINED,
609 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
610 VK_QUEUE_FAMILY_IGNORED,
611 VK_QUEUE_FAMILY_IGNORED,
621 const VkImageMemoryBarrier postCopyBarrier =
623 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
625 (VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
626 (VkAccessFlags)VK_ACCESS_SHADER_READ_BIT,
627 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
628 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
629 VK_QUEUE_FAMILY_IGNORED,
630 VK_QUEUE_FAMILY_IGNORED,
641 VK_CHECK(vkd.beginCommandBuffer(*cmdBuf, &beginInfo));
642 vkd.cmdPipelineBarrier(*cmdBuf,
643 (VkPipelineStageFlags)VK_PIPELINE_STAGE_HOST_BIT,
644 (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
645 (VkDependencyFlags)0u,
647 (const VkMemoryBarrier*)DE_NULL,
649 (const VkBufferMemoryBarrier*)DE_NULL,
652 vkd.cmdCopyBufferToImage(*cmdBuf, *stagingBuffer, *m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Info);
653 vkd.cmdPipelineBarrier(*cmdBuf,
654 (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
655 (VkPipelineStageFlags)VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
656 (VkDependencyFlags)0u,
658 (const VkMemoryBarrier*)DE_NULL,
660 (const VkBufferMemoryBarrier*)DE_NULL,
663 VK_CHECK(vkd.endCommandBuffer(*cmdBuf));
666 const Unique<VkFence> fence (createFence(vkd, device));
667 const VkSubmitInfo submitInfo =
669 VK_STRUCTURE_TYPE_SUBMIT_INFO,
672 (const VkSemaphore*)DE_NULL,
673 (const VkPipelineStageFlags*)DE_NULL,
677 (const VkSemaphore*)DE_NULL,
680 VK_CHECK(vkd.queueSubmit(context.getUniversalQueue(), 1u, &submitInfo, *fence));
681 VK_CHECK(vkd.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~0ull));
686 typedef SharedPtr<TestImage> TestImageSp;
688 // SamplerIndexingCaseInstance
690 class SamplerIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
695 NUM_INVOCATIONS = 64,
700 SamplerIndexingCaseInstance (Context& context,
701 const glu::ShaderType shaderType,
702 const ShaderSpec& shaderSpec,
704 glu::DataType samplerType,
705 const IndexExprType indexExprType,
706 const std::vector<int>& lookupIndices);
707 virtual ~SamplerIndexingCaseInstance (void);
709 virtual tcu::TestStatus iterate (void);
712 const glu::DataType m_samplerType;
713 const std::vector<int> m_lookupIndices;
716 SamplerIndexingCaseInstance::SamplerIndexingCaseInstance (Context& context,
717 const glu::ShaderType shaderType,
718 const ShaderSpec& shaderSpec,
720 glu::DataType samplerType,
721 const IndexExprType indexExprType,
722 const std::vector<int>& lookupIndices)
723 : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
724 , m_samplerType (samplerType)
725 , m_lookupIndices (lookupIndices)
729 SamplerIndexingCaseInstance::~SamplerIndexingCaseInstance (void)
733 bool isIntegerFormat (const tcu::TextureFormat& format)
735 const tcu::TextureChannelClass chnClass = tcu::getTextureChannelClass(format.type);
737 return chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ||
738 chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER;
741 tcu::TestStatus SamplerIndexingCaseInstance::iterate (void)
743 const int numInvocations = SamplerIndexingCaseInstance::NUM_INVOCATIONS;
744 const int numSamplers = SamplerIndexingCaseInstance::NUM_SAMPLERS;
745 const int numLookups = SamplerIndexingCaseInstance::NUM_LOOKUPS;
746 const glu::DataType coordType = getSamplerCoordType(m_samplerType);
747 const glu::DataType outputType = getSamplerOutputType(m_samplerType);
748 const tcu::TextureFormat texFormat = getSamplerTextureFormat(m_samplerType);
749 const int outLookupStride = numInvocations*getDataTypeScalarSize(outputType);
750 vector<float> coords;
751 vector<deUint32> outData;
752 vector<deUint8> texData (numSamplers * texFormat.getPixelSize());
753 const tcu::PixelBufferAccess refTexAccess (texFormat, numSamplers, 1, 1, &texData[0]);
754 de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
755 const TextureType texType = getTextureType(m_samplerType);
756 const tcu::Sampler::FilterMode filterMode = (isShadowSampler(m_samplerType) || isIntegerFormat(texFormat)) ? tcu::Sampler::NEAREST : tcu::Sampler::LINEAR;
758 // The shadow sampler with unnormalized coordinates is only used with the reference texture. Actual samplers in shaders use normalized coords.
759 const tcu::Sampler refSampler = isShadowSampler(m_samplerType)
760 ? tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
761 filterMode, filterMode, 0.0f, false /* non-normalized */,
762 tcu::Sampler::COMPAREMODE_LESS)
763 : tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
764 filterMode, filterMode);
766 const DeviceInterface& vkd = m_context.getDeviceInterface();
767 const VkDevice device = m_context.getDevice();
768 vector<TestImageSp> images;
769 vector<VkSamplerSp> samplers;
770 MovePtr<Buffer> indexBuffer;
771 Move<VkDescriptorSetLayout> extraResourcesLayout;
772 Move<VkDescriptorPool> extraResourcesSetPool;
773 Move<VkDescriptorSet> extraResourcesSet;
775 checkSupported(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
777 coords.resize(numInvocations * getDataTypeScalarSize(coordType));
779 if (texType == TEXTURE_TYPE_CUBE)
781 if (isShadowSampler(m_samplerType))
783 for (size_t i = 0; i < coords.size() / 4; i++)
785 coords[4 * i] = 1.0f;
786 coords[4 * i + 1] = coords[4 * i + 2] = coords[4 * i + 3] = 0.0f;
791 for (size_t i = 0; i < coords.size() / 3; i++)
793 coords[3 * i] = 1.0f;
794 coords[3 * i + 1] = coords[3 * i + 2] = 0.0f;
799 if (isShadowSampler(m_samplerType))
801 // Use different comparison value per invocation.
802 // \note Texture uses odd values, comparison even values.
803 const int numCoordComps = getDataTypeScalarSize(coordType);
804 const float cmpValues[] = { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
806 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
807 coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
810 fillTextureData(refTexAccess, rnd);
812 outData.resize(numLookups*outLookupStride);
814 for (int ndx = 0; ndx < numSamplers; ++ndx)
816 images.push_back(TestImageSp(new TestImage(m_context, texType, texFormat, &texData[ndx * texFormat.getPixelSize()])));
819 tcu::Sampler samplerCopy (refSampler);
820 samplerCopy.normalizedCoords = true;
823 const VkSamplerCreateInfo samplerParams = mapSampler(samplerCopy, texFormat);
824 samplers.push_back(VkSamplerSp(new Unique<VkSampler>(createSampler(vkd, device, &samplerParams))));
829 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
830 indexBuffer = createUniformIndexBuffer(m_context, numLookups, &m_lookupIndices[0]);
833 const VkDescriptorSetLayoutBinding bindings[] =
835 { 0u, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (deUint32)numSamplers, VK_SHADER_STAGE_ALL, DE_NULL },
836 { (deUint32)numSamplers, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
838 const VkDescriptorSetLayoutCreateInfo layoutInfo =
840 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
842 (VkDescriptorSetLayoutCreateFlags)0u,
843 DE_LENGTH_OF_ARRAY(bindings),
847 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
851 const VkDescriptorPoolSize poolSizes[] =
853 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (deUint32)numSamplers },
854 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
856 const VkDescriptorPoolCreateInfo poolInfo =
858 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
860 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
862 DE_LENGTH_OF_ARRAY(poolSizes),
866 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
870 const VkDescriptorSetAllocateInfo allocInfo =
872 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
874 *extraResourcesSetPool,
876 &extraResourcesLayout.get(),
879 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
883 vector<VkDescriptorImageInfo> imageInfos (numSamplers);
884 const VkWriteDescriptorSet descriptorWrite =
886 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
890 0u, // dstArrayElement
891 (deUint32)numSamplers,
892 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
894 (const VkDescriptorBufferInfo*)DE_NULL,
895 (const VkBufferView*)DE_NULL,
898 for (int ndx = 0; ndx < numSamplers; ++ndx)
900 imageInfos[ndx].sampler = **samplers[ndx];
901 imageInfos[ndx].imageView = images[ndx]->getImageView();
902 imageInfos[ndx].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
905 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
910 const VkDescriptorBufferInfo bufferInfo =
912 indexBuffer->getBuffer(),
916 const VkWriteDescriptorSet descriptorWrite =
918 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
921 (deUint32)numSamplers, // dstBinding
922 0u, // dstArrayElement
924 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
925 (const VkDescriptorImageInfo*)DE_NULL,
927 (const VkBufferView*)DE_NULL,
930 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
934 std::vector<void*> inputs;
935 std::vector<void*> outputs;
936 std::vector<int> expandedIndices;
937 UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
939 inputs.push_back(&coords[0]);
941 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
943 expandedIndices.resize(numInvocations * m_lookupIndices.size());
944 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
946 for (int invNdx = 0; invNdx < numInvocations; invNdx++)
947 expandedIndices[lookupNdx*numInvocations + invNdx] = m_lookupIndices[lookupNdx];
950 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
951 inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
954 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
955 outputs.push_back(&outData[outLookupStride*lookupNdx]);
957 executor->execute(numInvocations, &inputs[0], &outputs[0], *extraResourcesSet);
961 tcu::TestLog& log = m_context.getTestContext().getLog();
962 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
964 if (isShadowSampler(m_samplerType))
966 const int numCoordComps = getDataTypeScalarSize(coordType);
968 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
970 // Each invocation may have different results.
971 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
973 const float coord = coords[invocationNdx*numCoordComps + (numCoordComps-1)];
975 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
977 const int texNdx = m_lookupIndices[lookupNdx];
978 const float result = *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
979 const float reference = refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
981 if (de::abs(result-reference) > 0.005f)
983 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
984 << reference << ", got " << result
985 << tcu::TestLog::EndMessage;
987 if (testResult.getCode() == QP_TEST_RESULT_PASS)
988 testResult = tcu::TestStatus::fail("Got invalid lookup result");
995 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
997 // Validate results from first invocation
998 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
1000 const int texNdx = m_lookupIndices[lookupNdx];
1001 const deUint8* resPtr = (const deUint8*)&outData[lookupNdx*outLookupStride];
1004 if (outputType == glu::TYPE_FLOAT_VEC4)
1006 const float threshold = 1.0f / 256.0f;
1007 const tcu::Vec4 reference = refTexAccess.getPixel(texNdx, 0);
1008 const float* floatPtr = (const float*)resPtr;
1009 const tcu::Vec4 result (floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
1011 isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
1015 log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
1016 << reference << ", got " << result
1017 << tcu::TestLog::EndMessage;
1022 const tcu::UVec4 reference = refTexAccess.getPixelUint(texNdx, 0);
1023 const deUint32* uintPtr = (const deUint32*)resPtr;
1024 const tcu::UVec4 result (uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
1026 isOk = boolAll(equal(reference, result));
1030 log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
1031 << reference << ", got " << result
1032 << tcu::TestLog::EndMessage;
1036 if (!isOk && testResult.getCode() == QP_TEST_RESULT_PASS)
1037 testResult = tcu::TestStatus::fail("Got invalid lookup result");
1040 // Check results of other invocations against first one
1041 for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
1043 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
1045 const deUint32* refPtr = &outData[lookupNdx*outLookupStride];
1046 const deUint32* resPtr = refPtr + invocationNdx*4;
1049 for (int ndx = 0; ndx < 4; ndx++)
1050 isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
1054 log << tcu::TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
1055 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
1056 << " for lookup " << lookupNdx << " doesn't match result from first invocation "
1057 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
1058 << tcu::TestLog::EndMessage;
1060 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1061 testResult = tcu::TestStatus::fail("Inconsistent lookup results");
1071 class SamplerIndexingCase : public OpaqueTypeIndexingCase
1074 SamplerIndexingCase (tcu::TestContext& testCtx,
1076 const char* description,
1077 const glu::ShaderType shaderType,
1078 glu::DataType samplerType,
1079 IndexExprType indexExprType);
1080 virtual ~SamplerIndexingCase (void);
1082 virtual TestInstance* createInstance (Context& ctx) const;
1085 SamplerIndexingCase (const SamplerIndexingCase&);
1086 SamplerIndexingCase& operator= (const SamplerIndexingCase&);
1088 void createShaderSpec (void);
1090 const glu::DataType m_samplerType;
1091 const int m_numSamplers;
1092 const int m_numLookups;
1093 std::vector<int> m_lookupIndices;
1096 SamplerIndexingCase::SamplerIndexingCase (tcu::TestContext& testCtx,
1098 const char* description,
1099 const glu::ShaderType shaderType,
1100 glu::DataType samplerType,
1101 IndexExprType indexExprType)
1102 : OpaqueTypeIndexingCase (testCtx, name, description, shaderType, indexExprType)
1103 , m_samplerType (samplerType)
1104 , m_numSamplers (SamplerIndexingCaseInstance::NUM_SAMPLERS)
1105 , m_numLookups (SamplerIndexingCaseInstance::NUM_LOOKUPS)
1106 , m_lookupIndices (m_numLookups)
1112 SamplerIndexingCase::~SamplerIndexingCase (void)
1116 TestInstance* SamplerIndexingCase::createInstance (Context& ctx) const
1118 return new SamplerIndexingCaseInstance(ctx,
1127 void SamplerIndexingCase::createShaderSpec (void)
1129 de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1130 const char* samplersName = "texSampler";
1131 const char* coordsName = "coords";
1132 const char* indicesPrefix = "index";
1133 const char* resultPrefix = "result";
1134 const glu::DataType coordType = getSamplerCoordType(m_samplerType);
1135 const glu::DataType outType = getSamplerOutputType(m_samplerType);
1136 std::ostringstream global, code;
1138 for (int ndx = 0; ndx < m_numLookups; ndx++)
1139 m_lookupIndices[ndx] = rnd.getInt(0, m_numSamplers-1);
1141 m_shaderSpec.inputs.push_back(Symbol(coordsName, glu::VarType(coordType, glu::PRECISION_HIGHP)));
1143 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1144 global << "#extension GL_EXT_gpu_shader5 : require\n";
1146 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1147 global << "const highp int indexBase = 1;\n";
1150 "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
1152 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1154 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1156 const std::string varName = indicesPrefix + de::toString(lookupNdx);
1157 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1160 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1161 declareUniformIndexVars(global, (deUint32)m_numSamplers, indicesPrefix, m_numLookups);
1163 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1165 const std::string varName = resultPrefix + de::toString(lookupNdx);
1166 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(outType, glu::PRECISION_HIGHP)));
1169 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1171 code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
1173 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1174 code << m_lookupIndices[lookupNdx];
1175 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1176 code << "indexBase + " << (m_lookupIndices[lookupNdx]-1);
1178 code << indicesPrefix << lookupNdx;
1180 code << "], " << coordsName << ");\n";
1183 m_shaderSpec.globalDeclarations = global.str();
1184 m_shaderSpec.source = code.str();
1189 BLOCKTYPE_UNIFORM = 0,
1195 class BlockArrayIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1200 NUM_INVOCATIONS = 32,
1207 FLAG_USE_STORAGE_BUFFER = (1<<0) // Use VK_KHR_storage_buffer_storage_class
1210 BlockArrayIndexingCaseInstance (Context& context,
1211 const glu::ShaderType shaderType,
1212 const ShaderSpec& shaderSpec,
1214 BlockType blockType,
1215 const deUint32 flags,
1216 const IndexExprType indexExprType,
1217 const std::vector<int>& readIndices,
1218 const std::vector<deUint32>& inValues);
1219 virtual ~BlockArrayIndexingCaseInstance (void);
1221 virtual tcu::TestStatus iterate (void);
1224 const BlockType m_blockType;
1225 const deUint32 m_flags;
1226 const std::vector<int>& m_readIndices;
1227 const std::vector<deUint32>& m_inValues;
1230 BlockArrayIndexingCaseInstance::BlockArrayIndexingCaseInstance (Context& context,
1231 const glu::ShaderType shaderType,
1232 const ShaderSpec& shaderSpec,
1234 BlockType blockType,
1235 const deUint32 flags,
1236 const IndexExprType indexExprType,
1237 const std::vector<int>& readIndices,
1238 const std::vector<deUint32>& inValues)
1239 : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
1240 , m_blockType (blockType)
1242 , m_readIndices (readIndices)
1243 , m_inValues (inValues)
1247 BlockArrayIndexingCaseInstance::~BlockArrayIndexingCaseInstance (void)
1251 tcu::TestStatus BlockArrayIndexingCaseInstance::iterate (void)
1253 const int numInvocations = NUM_INVOCATIONS;
1254 const int numReads = NUM_READS;
1255 std::vector<deUint32> outValues (numInvocations*numReads);
1257 tcu::TestLog& log = m_context.getTestContext().getLog();
1258 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
1260 std::vector<int> expandedIndices;
1261 std::vector<void*> inputs;
1262 std::vector<void*> outputs;
1263 const VkBufferUsageFlags bufferUsage = m_blockType == BLOCKTYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1264 const VkDescriptorType descriptorType = m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1266 const DeviceInterface& vkd = m_context.getDeviceInterface();
1267 const VkDevice device = m_context.getDevice();
1269 // \note Using separate buffer per element - might want to test
1270 // offsets & single buffer in the future.
1271 vector<BufferSp> buffers (m_inValues.size());
1272 MovePtr<Buffer> indexBuffer;
1274 Move<VkDescriptorSetLayout> extraResourcesLayout;
1275 Move<VkDescriptorPool> extraResourcesSetPool;
1276 Move<VkDescriptorSet> extraResourcesSet;
1278 checkSupported(descriptorType);
1280 if ((m_flags & FLAG_USE_STORAGE_BUFFER) != 0)
1282 if (!de::contains(m_context.getDeviceExtensions().begin(), m_context.getDeviceExtensions().end(), "VK_KHR_storage_buffer_storage_class"))
1283 TCU_THROW(NotSupportedError, "VK_KHR_storage_buffer_storage_class is not supported");
1286 for (size_t bufferNdx = 0; bufferNdx < m_inValues.size(); ++bufferNdx)
1288 buffers[bufferNdx] = BufferSp(new Buffer(m_context, bufferUsage, sizeof(deUint32)));
1289 *(deUint32*)buffers[bufferNdx]->getHostPtr() = m_inValues[bufferNdx];
1290 buffers[bufferNdx]->flush();
1293 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1294 indexBuffer = createUniformIndexBuffer(m_context, numReads, &m_readIndices[0]);
1297 const VkDescriptorSetLayoutBinding bindings[] =
1299 { 0u, descriptorType, (deUint32)m_inValues.size(), VK_SHADER_STAGE_ALL, DE_NULL },
1300 { (deUint32)m_inValues.size(), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
1302 const VkDescriptorSetLayoutCreateInfo layoutInfo =
1304 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1306 (VkDescriptorSetLayoutCreateFlags)0u,
1307 DE_LENGTH_OF_ARRAY(bindings),
1311 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1315 const VkDescriptorPoolSize poolSizes[] =
1317 { descriptorType, (deUint32)m_inValues.size() },
1318 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
1320 const VkDescriptorPoolCreateInfo poolInfo =
1322 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1324 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1326 DE_LENGTH_OF_ARRAY(poolSizes),
1330 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1334 const VkDescriptorSetAllocateInfo allocInfo =
1336 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1338 *extraResourcesSetPool,
1340 &extraResourcesLayout.get(),
1343 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1347 vector<VkDescriptorBufferInfo> bufferInfos (m_inValues.size());
1348 const VkWriteDescriptorSet descriptorWrite =
1350 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1354 0u, // dstArrayElement
1355 (deUint32)m_inValues.size(),
1357 (const VkDescriptorImageInfo*)DE_NULL,
1359 (const VkBufferView*)DE_NULL,
1362 for (size_t ndx = 0; ndx < m_inValues.size(); ++ndx)
1364 bufferInfos[ndx].buffer = buffers[ndx]->getBuffer();
1365 bufferInfos[ndx].offset = 0u;
1366 bufferInfos[ndx].range = VK_WHOLE_SIZE;
1369 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1374 const VkDescriptorBufferInfo bufferInfo =
1376 indexBuffer->getBuffer(),
1380 const VkWriteDescriptorSet descriptorWrite =
1382 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1385 (deUint32)m_inValues.size(), // dstBinding
1386 0u, // dstArrayElement
1388 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1389 (const VkDescriptorImageInfo*)DE_NULL,
1391 (const VkBufferView*)DE_NULL,
1394 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1397 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1399 expandedIndices.resize(numInvocations * m_readIndices.size());
1401 for (int readNdx = 0; readNdx < numReads; readNdx++)
1403 int* dst = &expandedIndices[numInvocations*readNdx];
1404 std::fill(dst, dst+numInvocations, m_readIndices[readNdx]);
1407 for (int readNdx = 0; readNdx < numReads; readNdx++)
1408 inputs.push_back(&expandedIndices[readNdx*numInvocations]);
1411 for (int readNdx = 0; readNdx < numReads; readNdx++)
1412 outputs.push_back(&outValues[readNdx*numInvocations]);
1415 UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1417 executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1420 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1422 for (int readNdx = 0; readNdx < numReads; readNdx++)
1424 const deUint32 refValue = m_inValues[m_readIndices[readNdx]];
1425 const deUint32 resValue = outValues[readNdx*numInvocations + invocationNdx];
1427 if (refValue != resValue)
1429 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1430 << ", read " << readNdx << ": expected "
1431 << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
1432 << tcu::TestLog::EndMessage;
1434 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1435 testResult = tcu::TestStatus::fail("Invalid result value");
1443 class BlockArrayIndexingCase : public OpaqueTypeIndexingCase
1446 BlockArrayIndexingCase (tcu::TestContext& testCtx,
1448 const char* description,
1449 BlockType blockType,
1450 IndexExprType indexExprType,
1451 const glu::ShaderType shaderType,
1452 deUint32 flags = 0u);
1453 virtual ~BlockArrayIndexingCase (void);
1455 virtual TestInstance* createInstance (Context& ctx) const;
1458 BlockArrayIndexingCase (const BlockArrayIndexingCase&);
1459 BlockArrayIndexingCase& operator= (const BlockArrayIndexingCase&);
1461 void createShaderSpec (void);
1463 const BlockType m_blockType;
1464 const deUint32 m_flags;
1465 std::vector<int> m_readIndices;
1466 std::vector<deUint32> m_inValues;
1469 BlockArrayIndexingCase::BlockArrayIndexingCase (tcu::TestContext& testCtx,
1471 const char* description,
1472 BlockType blockType,
1473 IndexExprType indexExprType,
1474 const glu::ShaderType shaderType,
1476 : OpaqueTypeIndexingCase (testCtx, name, description, shaderType, indexExprType)
1477 , m_blockType (blockType)
1479 , m_readIndices (BlockArrayIndexingCaseInstance::NUM_READS)
1480 , m_inValues (BlockArrayIndexingCaseInstance::NUM_INSTANCES)
1486 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
1490 TestInstance* BlockArrayIndexingCase::createInstance (Context& ctx) const
1492 return new BlockArrayIndexingCaseInstance(ctx,
1503 void BlockArrayIndexingCase::createShaderSpec (void)
1505 const int numInstances = BlockArrayIndexingCaseInstance::NUM_INSTANCES;
1506 const int numReads = BlockArrayIndexingCaseInstance::NUM_READS;
1507 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
1508 const char* blockName = "Block";
1509 const char* instanceName = "block";
1510 const char* indicesPrefix = "index";
1511 const char* resultPrefix = "result";
1512 const char* interfaceName = m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
1513 std::ostringstream global, code;
1515 for (int readNdx = 0; readNdx < numReads; readNdx++)
1516 m_readIndices[readNdx] = rnd.getInt(0, numInstances-1);
1518 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1519 m_inValues[instanceNdx] = rnd.getUint32();
1521 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1522 global << "#extension GL_EXT_gpu_shader5 : require\n";
1524 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1525 global << "const highp int indexBase = 1;\n";
1528 "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) " << interfaceName << " " << blockName << "\n"
1530 " highp uint value;\n"
1531 "} " << instanceName << "[" << numInstances << "];\n";
1533 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1535 for (int readNdx = 0; readNdx < numReads; readNdx++)
1537 const std::string varName = indicesPrefix + de::toString(readNdx);
1538 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1541 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1542 declareUniformIndexVars(global, (deUint32)m_inValues.size(), indicesPrefix, numReads);
1544 for (int readNdx = 0; readNdx < numReads; readNdx++)
1546 const std::string varName = resultPrefix + de::toString(readNdx);
1547 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1550 for (int readNdx = 0; readNdx < numReads; readNdx++)
1552 code << resultPrefix << readNdx << " = " << instanceName << "[";
1554 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1555 code << m_readIndices[readNdx];
1556 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1557 code << "indexBase + " << (m_readIndices[readNdx]-1);
1559 code << indicesPrefix << readNdx;
1561 code << "].value;\n";
1564 m_shaderSpec.globalDeclarations = global.str();
1565 m_shaderSpec.source = code.str();
1567 if ((m_flags & BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER) != 0)
1568 m_shaderSpec.buildOptions.flags |= vk::GlslBuildOptions::FLAG_USE_STORAGE_BUFFER_STORAGE_CLASS;
1571 class AtomicCounterIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1576 NUM_INVOCATIONS = 32,
1581 AtomicCounterIndexingCaseInstance (Context& context,
1582 const glu::ShaderType shaderType,
1583 const ShaderSpec& shaderSpec,
1585 const std::vector<int>& opIndices,
1586 const IndexExprType indexExprType);
1587 virtual ~AtomicCounterIndexingCaseInstance (void);
1589 virtual tcu::TestStatus iterate (void);
1592 const std::vector<int>& m_opIndices;
1595 AtomicCounterIndexingCaseInstance::AtomicCounterIndexingCaseInstance (Context& context,
1596 const glu::ShaderType shaderType,
1597 const ShaderSpec& shaderSpec,
1599 const std::vector<int>& opIndices,
1600 const IndexExprType indexExprType)
1601 : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
1602 , m_opIndices (opIndices)
1606 AtomicCounterIndexingCaseInstance::~AtomicCounterIndexingCaseInstance (void)
1610 tcu::TestStatus AtomicCounterIndexingCaseInstance::iterate (void)
1612 const int numInvocations = NUM_INVOCATIONS;
1613 const int numCounters = NUM_COUNTERS;
1614 const int numOps = NUM_OPS;
1615 std::vector<int> expandedIndices;
1616 std::vector<void*> inputs;
1617 std::vector<void*> outputs;
1618 std::vector<deUint32> outValues (numInvocations*numOps);
1620 const DeviceInterface& vkd = m_context.getDeviceInterface();
1621 const VkDevice device = m_context.getDevice();
1622 const VkPhysicalDeviceFeatures& deviceFeatures = m_context.getDeviceFeatures();
1624 //Check stores and atomic operation support.
1625 switch (m_shaderType)
1627 case glu::SHADERTYPE_VERTEX:
1628 case glu::SHADERTYPE_TESSELLATION_CONTROL:
1629 case glu::SHADERTYPE_TESSELLATION_EVALUATION:
1630 case glu::SHADERTYPE_GEOMETRY:
1631 if(!deviceFeatures.vertexPipelineStoresAndAtomics)
1632 TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in Vertex, Tessellation, and Geometry shader.");
1634 case glu::SHADERTYPE_FRAGMENT:
1635 if(!deviceFeatures.fragmentStoresAndAtomics)
1636 TCU_THROW(NotSupportedError, "Stores and atomic operations are not supported in fragment shader.");
1638 case glu::SHADERTYPE_COMPUTE:
1641 throw tcu::InternalError("Unsupported shader type");
1644 // \note Using separate buffer per element - might want to test
1645 // offsets & single buffer in the future.
1646 Buffer atomicOpBuffer (m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(deUint32)*numCounters);
1647 MovePtr<Buffer> indexBuffer;
1649 Move<VkDescriptorSetLayout> extraResourcesLayout;
1650 Move<VkDescriptorPool> extraResourcesSetPool;
1651 Move<VkDescriptorSet> extraResourcesSet;
1653 checkSupported(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1655 deMemset(atomicOpBuffer.getHostPtr(), 0, sizeof(deUint32)*numCounters);
1656 atomicOpBuffer.flush();
1658 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1659 indexBuffer = createUniformIndexBuffer(m_context, numOps, &m_opIndices[0]);
1662 const VkDescriptorSetLayoutBinding bindings[] =
1664 { 0u, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL },
1665 { 1u, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
1667 const VkDescriptorSetLayoutCreateInfo layoutInfo =
1669 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1671 (VkDescriptorSetLayoutCreateFlags)0u,
1672 DE_LENGTH_OF_ARRAY(bindings),
1676 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1680 const VkDescriptorPoolSize poolSizes[] =
1682 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, },
1683 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
1685 const VkDescriptorPoolCreateInfo poolInfo =
1687 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1689 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1691 DE_LENGTH_OF_ARRAY(poolSizes),
1695 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1699 const VkDescriptorSetAllocateInfo allocInfo =
1701 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1703 *extraResourcesSetPool,
1705 &extraResourcesLayout.get(),
1708 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1712 const VkDescriptorBufferInfo bufferInfo =
1714 atomicOpBuffer.getBuffer(),
1718 const VkWriteDescriptorSet descriptorWrite =
1720 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1724 0u, // dstArrayElement
1726 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1727 (const VkDescriptorImageInfo*)DE_NULL,
1729 (const VkBufferView*)DE_NULL,
1732 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1737 const VkDescriptorBufferInfo bufferInfo =
1739 indexBuffer->getBuffer(),
1743 const VkWriteDescriptorSet descriptorWrite =
1745 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1749 0u, // dstArrayElement
1751 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1752 (const VkDescriptorImageInfo*)DE_NULL,
1754 (const VkBufferView*)DE_NULL,
1757 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1760 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1762 expandedIndices.resize(numInvocations * m_opIndices.size());
1764 for (int opNdx = 0; opNdx < numOps; opNdx++)
1766 int* dst = &expandedIndices[numInvocations*opNdx];
1767 std::fill(dst, dst+numInvocations, m_opIndices[opNdx]);
1770 for (int opNdx = 0; opNdx < numOps; opNdx++)
1771 inputs.push_back(&expandedIndices[opNdx*numInvocations]);
1774 for (int opNdx = 0; opNdx < numOps; opNdx++)
1775 outputs.push_back(&outValues[opNdx*numInvocations]);
1778 UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1780 executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1784 tcu::TestLog& log = m_context.getTestContext().getLog();
1785 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
1786 std::vector<int> numHits (numCounters, 0); // Number of hits per counter.
1787 std::vector<deUint32> counterValues (numCounters);
1788 std::vector<std::vector<bool> > counterMasks (numCounters);
1790 for (int opNdx = 0; opNdx < numOps; opNdx++)
1791 numHits[m_opIndices[opNdx]] += 1;
1793 // Read counter values
1795 const void* mapPtr = atomicOpBuffer.getHostPtr();
1796 DE_ASSERT(mapPtr != DE_NULL);
1797 atomicOpBuffer.invalidate();
1798 std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
1801 // Verify counter values
1802 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1804 const deUint32 refCount = (deUint32)(numHits[counterNdx]*numInvocations);
1805 const deUint32 resCount = counterValues[counterNdx];
1807 if (refCount != resCount)
1809 log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1810 << ", expected " << refCount
1811 << tcu::TestLog::EndMessage;
1813 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1814 testResult = tcu::TestStatus::fail("Invalid atomic counter value");
1818 // Allocate bitmasks - one bit per each valid result value
1819 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1821 const int counterValue = numHits[counterNdx]*numInvocations;
1822 counterMasks[counterNdx].resize(counterValue, false);
1825 // Verify result values from shaders
1826 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1828 for (int opNdx = 0; opNdx < numOps; opNdx++)
1830 const int counterNdx = m_opIndices[opNdx];
1831 const deUint32 resValue = outValues[opNdx*numInvocations + invocationNdx];
1832 const bool rangeOk = de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
1833 const bool notSeen = rangeOk && !counterMasks[counterNdx][resValue];
1834 const bool isOk = rangeOk && notSeen;
1838 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1839 << ", op " << opNdx << ": got invalid result value "
1841 << tcu::TestLog::EndMessage;
1843 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1844 testResult = tcu::TestStatus::fail("Invalid result value");
1848 // Mark as used - no other invocation should see this value from same counter.
1849 counterMasks[counterNdx][resValue] = true;
1854 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1856 // Consistency check - all masks should be 1 now
1857 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1859 for (std::vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
1860 TCU_CHECK_INTERNAL(*i);
1868 class AtomicCounterIndexingCase : public OpaqueTypeIndexingCase
1871 AtomicCounterIndexingCase (tcu::TestContext& testCtx,
1873 const char* description,
1874 IndexExprType indexExprType,
1875 const glu::ShaderType shaderType);
1876 virtual ~AtomicCounterIndexingCase (void);
1878 virtual TestInstance* createInstance (Context& ctx) const;
1881 AtomicCounterIndexingCase (const BlockArrayIndexingCase&);
1882 AtomicCounterIndexingCase& operator= (const BlockArrayIndexingCase&);
1884 void createShaderSpec (void);
1886 std::vector<int> m_opIndices;
1889 AtomicCounterIndexingCase::AtomicCounterIndexingCase (tcu::TestContext& testCtx,
1891 const char* description,
1892 IndexExprType indexExprType,
1893 const glu::ShaderType shaderType)
1894 : OpaqueTypeIndexingCase (testCtx, name, description, shaderType, indexExprType)
1895 , m_opIndices (AtomicCounterIndexingCaseInstance::NUM_OPS)
1901 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
1905 TestInstance* AtomicCounterIndexingCase::createInstance (Context& ctx) const
1907 return new AtomicCounterIndexingCaseInstance(ctx,
1915 void AtomicCounterIndexingCase::createShaderSpec (void)
1917 const int numCounters = AtomicCounterIndexingCaseInstance::NUM_COUNTERS;
1918 const int numOps = AtomicCounterIndexingCaseInstance::NUM_OPS;
1919 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1921 for (int opNdx = 0; opNdx < numOps; opNdx++)
1922 m_opIndices[opNdx] = rnd.getInt(0, numOps-1);
1925 const char* indicesPrefix = "index";
1926 const char* resultPrefix = "result";
1927 std::ostringstream global, code;
1929 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1930 global << "#extension GL_EXT_gpu_shader5 : require\n";
1932 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1933 global << "const highp int indexBase = 1;\n";
1936 "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0, std430) buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
1938 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1940 for (int opNdx = 0; opNdx < numOps; opNdx++)
1942 const std::string varName = indicesPrefix + de::toString(opNdx);
1943 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1946 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1947 declareUniformIndexVars(global, 1, indicesPrefix, numOps);
1949 for (int opNdx = 0; opNdx < numOps; opNdx++)
1951 const std::string varName = resultPrefix + de::toString(opNdx);
1952 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1955 for (int opNdx = 0; opNdx < numOps; opNdx++)
1957 code << resultPrefix << opNdx << " = atomicAdd(counter[";
1959 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1960 code << m_opIndices[opNdx];
1961 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1962 code << "indexBase + " << (m_opIndices[opNdx]-1);
1964 code << indicesPrefix << opNdx;
1966 code << "], uint(1));\n";
1969 m_shaderSpec.globalDeclarations = global.str();
1970 m_shaderSpec.source = code.str();
1974 class OpaqueTypeIndexingTests : public tcu::TestCaseGroup
1977 OpaqueTypeIndexingTests (tcu::TestContext& testCtx);
1978 virtual ~OpaqueTypeIndexingTests (void);
1980 virtual void init (void);
1983 OpaqueTypeIndexingTests (const OpaqueTypeIndexingTests&);
1984 OpaqueTypeIndexingTests& operator= (const OpaqueTypeIndexingTests&);
1987 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (tcu::TestContext& testCtx)
1988 : tcu::TestCaseGroup(testCtx, "opaque_type_indexing", "Opaque Type Indexing Tests")
1992 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1996 void OpaqueTypeIndexingTests::init (void)
2002 const char* description;
2005 { INDEX_EXPR_TYPE_CONST_LITERAL, "const_literal", "Indexing by constant literal" },
2006 { INDEX_EXPR_TYPE_CONST_EXPRESSION, "const_expression", "Indexing by constant expression" },
2007 { INDEX_EXPR_TYPE_UNIFORM, "uniform", "Indexing by uniform value" },
2008 { INDEX_EXPR_TYPE_DYNAMIC_UNIFORM, "dynamically_uniform", "Indexing by dynamically uniform expression" }
2013 glu::ShaderType type;
2017 { glu::SHADERTYPE_VERTEX, "vertex" },
2018 { glu::SHADERTYPE_FRAGMENT, "fragment" },
2019 { glu::SHADERTYPE_GEOMETRY, "geometry" },
2020 { glu::SHADERTYPE_TESSELLATION_CONTROL, "tess_ctrl" },
2021 { glu::SHADERTYPE_TESSELLATION_EVALUATION, "tess_eval" },
2022 { glu::SHADERTYPE_COMPUTE, "compute" }
2027 static const glu::DataType samplerTypes[] =
2029 // \note 1D images will be added by a later extension.
2030 // glu::TYPE_SAMPLER_1D,
2031 glu::TYPE_SAMPLER_2D,
2032 glu::TYPE_SAMPLER_CUBE,
2033 glu::TYPE_SAMPLER_2D_ARRAY,
2034 glu::TYPE_SAMPLER_3D,
2035 // glu::TYPE_SAMPLER_1D_SHADOW,
2036 glu::TYPE_SAMPLER_2D_SHADOW,
2037 glu::TYPE_SAMPLER_CUBE_SHADOW,
2038 glu::TYPE_SAMPLER_2D_ARRAY_SHADOW,
2039 // glu::TYPE_INT_SAMPLER_1D,
2040 glu::TYPE_INT_SAMPLER_2D,
2041 glu::TYPE_INT_SAMPLER_CUBE,
2042 glu::TYPE_INT_SAMPLER_2D_ARRAY,
2043 glu::TYPE_INT_SAMPLER_3D,
2044 // glu::TYPE_UINT_SAMPLER_1D,
2045 glu::TYPE_UINT_SAMPLER_2D,
2046 glu::TYPE_UINT_SAMPLER_CUBE,
2047 glu::TYPE_UINT_SAMPLER_2D_ARRAY,
2048 glu::TYPE_UINT_SAMPLER_3D,
2051 tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
2052 addChild(samplerGroup);
2054 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
2056 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
2057 tcu::TestCaseGroup* const indexGroup = new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
2058 samplerGroup->addChild(indexGroup);
2060 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
2062 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
2063 tcu::TestCaseGroup* const shaderGroup = new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
2064 indexGroup->addChild(shaderGroup);
2066 // \note [pyry] In Vulkan CTS 1.0.2 sampler groups should not cover tess/geom stages
2067 if ((shaderType != glu::SHADERTYPE_VERTEX) &&
2068 (shaderType != glu::SHADERTYPE_FRAGMENT) &&
2069 (shaderType != glu::SHADERTYPE_COMPUTE))
2072 for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
2074 const glu::DataType samplerType = samplerTypes[samplerTypeNdx];
2075 const char* samplerName = getDataTypeName(samplerType);
2076 const std::string caseName = de::toLower(samplerName);
2078 shaderGroup->addChild(new SamplerIndexingCase(m_testCtx, caseName.c_str(), "", shaderType, samplerType, indexExprType));
2084 // .ubo / .ssbo / .atomic_counter
2086 tcu::TestCaseGroup* const uboGroup = new tcu::TestCaseGroup(m_testCtx, "ubo", "Uniform Block Instance Array Indexing Tests");
2087 tcu::TestCaseGroup* const ssboGroup = new tcu::TestCaseGroup(m_testCtx, "ssbo", "Buffer Block Instance Array Indexing Tests");
2088 tcu::TestCaseGroup* const ssboStorageBufGroup = new tcu::TestCaseGroup(m_testCtx, "ssbo_storage_buffer_decoration", "Buffer Block (new StorageBuffer decoration) Instance Array Indexing Tests");
2089 tcu::TestCaseGroup* const acGroup = new tcu::TestCaseGroup(m_testCtx, "atomic_counter", "Atomic Counter Array Indexing Tests");
2091 addChild(ssboGroup);
2092 addChild(ssboStorageBufGroup);
2095 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
2097 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
2098 const char* indexExprName = indexingTypes[indexTypeNdx].name;
2099 const char* indexExprDesc = indexingTypes[indexTypeNdx].description;
2101 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
2103 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
2104 const std::string name = std::string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
2106 // \note [pyry] In Vulkan CTS 1.0.2 ubo/ssbo/atomic_counter groups should not cover tess/geom stages
2107 if ((shaderType == glu::SHADERTYPE_VERTEX) ||
2108 (shaderType == glu::SHADERTYPE_FRAGMENT) ||
2109 (shaderType == glu::SHADERTYPE_COMPUTE))
2111 uboGroup->addChild (new BlockArrayIndexingCase (m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_UNIFORM, indexExprType, shaderType));
2112 acGroup->addChild (new AtomicCounterIndexingCase (m_testCtx, name.c_str(), indexExprDesc, indexExprType, shaderType));
2114 if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
2115 ssboGroup->addChild (new BlockArrayIndexingCase (m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType));
2118 if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
2119 ssboStorageBufGroup->addChild (new BlockArrayIndexingCase (m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType, (deUint32)BlockArrayIndexingCaseInstance::FLAG_USE_STORAGE_BUFFER));
2127 tcu::TestCaseGroup* createOpaqueTypeIndexingTests (tcu::TestContext& testCtx)
2129 return new OpaqueTypeIndexingTests(testCtx);