1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
13 * http://www.apache.org/licenses/LICENSE-2.0
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
23 * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
24 *//*--------------------------------------------------------------------*/
26 #include "vktOpaqueTypeIndexingTests.hpp"
28 #include "vkRefUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkTypeUtil.hpp"
32 #include "vkQueryUtil.hpp"
34 #include "tcuTexture.hpp"
35 #include "tcuTestLog.hpp"
36 #include "tcuVectorUtil.hpp"
37 #include "tcuTextureUtil.hpp"
39 #include "deStringUtil.hpp"
40 #include "deSharedPtr.hpp"
41 #include "deRandom.hpp"
43 #include "vktShaderExecutor.hpp"
49 namespace shaderexecutor
62 typedef SharedPtr<Unique<VkSampler> > VkSamplerSp;
69 Buffer (Context& context, VkBufferUsageFlags usage, size_t size);
71 VkBuffer getBuffer (void) const { return *m_buffer; }
72 void* getHostPtr (void) const { return m_allocation->getHostPtr(); }
74 void invalidate (void);
77 const DeviceInterface& m_vkd;
78 const VkDevice m_device;
79 const Unique<VkBuffer> m_buffer;
80 const UniquePtr<Allocation> m_allocation;
83 typedef de::SharedPtr<Buffer> BufferSp;
85 Move<VkBuffer> createBuffer (const DeviceInterface& vkd, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usageFlags)
87 const VkBufferCreateInfo createInfo =
89 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
91 (VkBufferCreateFlags)0,
94 VK_SHARING_MODE_EXCLUSIVE,
98 return createBuffer(vkd, device, &createInfo);
101 MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkBuffer buffer)
103 MovePtr<Allocation> alloc (allocator.allocate(getBufferMemoryRequirements(vkd, device, buffer), MemoryRequirement::HostVisible));
105 VK_CHECK(vkd.bindBufferMemory(device, buffer, alloc->getMemory(), alloc->getOffset()));
110 Buffer::Buffer (Context& context, VkBufferUsageFlags usage, size_t size)
111 : m_vkd (context.getDeviceInterface())
112 , m_device (context.getDevice())
113 , m_buffer (createBuffer (context.getDeviceInterface(),
117 , m_allocation (allocateAndBindMemory (context.getDeviceInterface(),
119 context.getDefaultAllocator(),
124 void Buffer::flush (void)
126 flushMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
129 void Buffer::invalidate (void)
131 invalidateMappedMemoryRange(m_vkd, m_device, m_allocation->getMemory(), m_allocation->getOffset(), VK_WHOLE_SIZE);
134 MovePtr<Buffer> createUniformIndexBuffer (Context& context, int numIndices, const int* indices)
136 MovePtr<Buffer> buffer (new Buffer(context, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, sizeof(int)*numIndices));
137 int* const bufPtr = (int*)buffer->getHostPtr();
139 for (int ndx = 0; ndx < numIndices; ++ndx)
140 bufPtr[ndx] = indices[ndx];
151 INDEX_EXPR_TYPE_CONST_LITERAL = 0,
152 INDEX_EXPR_TYPE_CONST_EXPRESSION,
153 INDEX_EXPR_TYPE_UNIFORM,
154 INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
164 TEXTURE_TYPE_2D_ARRAY,
170 class OpaqueTypeIndexingCase : public TestCase
173 OpaqueTypeIndexingCase (tcu::TestContext& testCtx,
175 const char* description,
176 const glu::ShaderType shaderType,
177 const IndexExprType indexExprType);
178 virtual ~OpaqueTypeIndexingCase (void);
180 virtual void initPrograms (vk::SourceCollections& programCollection) const
182 generateSources(m_shaderType, m_shaderSpec, programCollection);
187 const glu::ShaderType m_shaderType;
188 const IndexExprType m_indexExprType;
189 ShaderSpec m_shaderSpec;
192 OpaqueTypeIndexingCase::OpaqueTypeIndexingCase (tcu::TestContext& testCtx,
194 const char* description,
195 const glu::ShaderType shaderType,
196 const IndexExprType indexExprType)
197 : TestCase (testCtx, name, description)
199 , m_shaderType (shaderType)
200 , m_indexExprType (indexExprType)
204 OpaqueTypeIndexingCase::~OpaqueTypeIndexingCase (void)
208 class OpaqueTypeIndexingTestInstance : public TestInstance
211 OpaqueTypeIndexingTestInstance (Context& context,
212 const glu::ShaderType shaderType,
213 const ShaderSpec& shaderSpec,
215 const IndexExprType indexExprType);
216 virtual ~OpaqueTypeIndexingTestInstance (void);
218 virtual tcu::TestStatus iterate (void) = 0;
221 void checkSupported (const VkDescriptorType descriptorType);
224 tcu::TestContext& m_testCtx;
225 const glu::ShaderType m_shaderType;
226 const ShaderSpec& m_shaderSpec;
228 const IndexExprType m_indexExprType;
231 OpaqueTypeIndexingTestInstance::OpaqueTypeIndexingTestInstance (Context& context,
232 const glu::ShaderType shaderType,
233 const ShaderSpec& shaderSpec,
235 const IndexExprType indexExprType)
236 : TestInstance (context)
237 , m_testCtx (context.getTestContext())
238 , m_shaderType (shaderType)
239 , m_shaderSpec (shaderSpec)
241 , m_indexExprType (indexExprType)
245 OpaqueTypeIndexingTestInstance::~OpaqueTypeIndexingTestInstance (void)
249 void OpaqueTypeIndexingTestInstance::checkSupported (const VkDescriptorType descriptorType)
251 const VkPhysicalDeviceFeatures& deviceFeatures = m_context.getDeviceFeatures();
253 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
255 switch (descriptorType)
257 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
258 if (!deviceFeatures.shaderSampledImageArrayDynamicIndexing)
259 TCU_THROW(NotSupportedError, "Dynamic indexing of sampler arrays is not supported");
262 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
263 if (!deviceFeatures.shaderUniformBufferArrayDynamicIndexing)
264 TCU_THROW(NotSupportedError, "Dynamic indexing of uniform buffer arrays is not supported");
267 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
268 if (!deviceFeatures.shaderStorageBufferArrayDynamicIndexing)
269 TCU_THROW(NotSupportedError, "Dynamic indexing of storage buffer arrays is not supported");
278 static void declareUniformIndexVars (std::ostream& str, deUint32 bindingLocation, const char* varPrefix, int numVars)
280 str << "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = " << bindingLocation << ", std140) uniform Indices\n{\n";
282 for (int varNdx = 0; varNdx < numVars; varNdx++)
283 str << "\thighp int " << varPrefix << varNdx << ";\n";
288 static TextureType getTextureType (glu::DataType samplerType)
292 case glu::TYPE_SAMPLER_1D:
293 case glu::TYPE_INT_SAMPLER_1D:
294 case glu::TYPE_UINT_SAMPLER_1D:
295 case glu::TYPE_SAMPLER_1D_SHADOW:
296 return TEXTURE_TYPE_1D;
298 case glu::TYPE_SAMPLER_2D:
299 case glu::TYPE_INT_SAMPLER_2D:
300 case glu::TYPE_UINT_SAMPLER_2D:
301 case glu::TYPE_SAMPLER_2D_SHADOW:
302 return TEXTURE_TYPE_2D;
304 case glu::TYPE_SAMPLER_CUBE:
305 case glu::TYPE_INT_SAMPLER_CUBE:
306 case glu::TYPE_UINT_SAMPLER_CUBE:
307 case glu::TYPE_SAMPLER_CUBE_SHADOW:
308 return TEXTURE_TYPE_CUBE;
310 case glu::TYPE_SAMPLER_2D_ARRAY:
311 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
312 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
313 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
314 return TEXTURE_TYPE_2D_ARRAY;
316 case glu::TYPE_SAMPLER_3D:
317 case glu::TYPE_INT_SAMPLER_3D:
318 case glu::TYPE_UINT_SAMPLER_3D:
319 return TEXTURE_TYPE_3D;
322 throw tcu::InternalError("Invalid sampler type");
326 static bool isShadowSampler (glu::DataType samplerType)
328 return samplerType == glu::TYPE_SAMPLER_1D_SHADOW ||
329 samplerType == glu::TYPE_SAMPLER_2D_SHADOW ||
330 samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW ||
331 samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW;
334 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
338 case glu::TYPE_SAMPLER_1D:
339 case glu::TYPE_SAMPLER_2D:
340 case glu::TYPE_SAMPLER_CUBE:
341 case glu::TYPE_SAMPLER_2D_ARRAY:
342 case glu::TYPE_SAMPLER_3D:
343 return glu::TYPE_FLOAT_VEC4;
345 case glu::TYPE_SAMPLER_1D_SHADOW:
346 case glu::TYPE_SAMPLER_2D_SHADOW:
347 case glu::TYPE_SAMPLER_CUBE_SHADOW:
348 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
349 return glu::TYPE_FLOAT;
351 case glu::TYPE_INT_SAMPLER_1D:
352 case glu::TYPE_INT_SAMPLER_2D:
353 case glu::TYPE_INT_SAMPLER_CUBE:
354 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
355 case glu::TYPE_INT_SAMPLER_3D:
356 return glu::TYPE_INT_VEC4;
358 case glu::TYPE_UINT_SAMPLER_1D:
359 case glu::TYPE_UINT_SAMPLER_2D:
360 case glu::TYPE_UINT_SAMPLER_CUBE:
361 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
362 case glu::TYPE_UINT_SAMPLER_3D:
363 return glu::TYPE_UINT_VEC4;
366 throw tcu::InternalError("Invalid sampler type");
370 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
372 const glu::DataType outType = getSamplerOutputType(samplerType);
373 const glu::DataType outScalarType = glu::getDataTypeScalarType(outType);
375 switch (outScalarType)
377 case glu::TYPE_FLOAT:
378 if (isShadowSampler(samplerType))
379 return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
381 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
383 case glu::TYPE_INT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
384 case glu::TYPE_UINT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
387 throw tcu::InternalError("Invalid sampler type");
391 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
393 const TextureType texType = getTextureType(samplerType);
398 case TEXTURE_TYPE_1D: numCoords = 1; break;
399 case TEXTURE_TYPE_2D: numCoords = 2; break;
400 case TEXTURE_TYPE_2D_ARRAY: numCoords = 3; break;
401 case TEXTURE_TYPE_CUBE: numCoords = 3; break;
402 case TEXTURE_TYPE_3D: numCoords = 3; break;
407 if (isShadowSampler(samplerType))
410 DE_ASSERT(de::inRange(numCoords, 1, 4));
412 return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
415 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
417 DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
419 if (access.getFormat().order == tcu::TextureFormat::D)
421 // \note Texture uses odd values, lookup even values to avoid precision issues.
422 const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
424 for (int ndx = 0; ndx < access.getWidth(); ndx++)
425 access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
429 TCU_CHECK_INTERNAL(access.getFormat().order == tcu::TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
431 for (int ndx = 0; ndx < access.getWidth(); ndx++)
432 *((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
436 static vk::VkImageType getVkImageType (TextureType texType)
440 case TEXTURE_TYPE_1D: return vk::VK_IMAGE_TYPE_1D;
441 case TEXTURE_TYPE_2D:
442 case TEXTURE_TYPE_2D_ARRAY: return vk::VK_IMAGE_TYPE_2D;
443 case TEXTURE_TYPE_CUBE: return vk::VK_IMAGE_TYPE_2D;
444 case TEXTURE_TYPE_3D: return vk::VK_IMAGE_TYPE_3D;
446 DE_FATAL("Impossible");
447 return (vk::VkImageType)0;
451 static vk::VkImageViewType getVkImageViewType (TextureType texType)
455 case TEXTURE_TYPE_1D: return vk::VK_IMAGE_VIEW_TYPE_1D;
456 case TEXTURE_TYPE_2D: return vk::VK_IMAGE_VIEW_TYPE_2D;
457 case TEXTURE_TYPE_2D_ARRAY: return vk::VK_IMAGE_VIEW_TYPE_2D_ARRAY;
458 case TEXTURE_TYPE_CUBE: return vk::VK_IMAGE_VIEW_TYPE_CUBE;
459 case TEXTURE_TYPE_3D: return vk::VK_IMAGE_VIEW_TYPE_3D;
461 DE_FATAL("Impossible");
462 return (vk::VkImageViewType)0;
466 //! Test image with 1-pixel dimensions and no mipmaps
470 TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue);
472 VkImageView getImageView (void) const { return *m_imageView; }
475 const Unique<VkImage> m_image;
476 const UniquePtr<Allocation> m_allocation;
477 const Unique<VkImageView> m_imageView;
480 Move<VkImage> createTestImage (const DeviceInterface& vkd, VkDevice device, TextureType texType, tcu::TextureFormat format)
482 const VkImageCreateInfo createInfo =
484 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
486 (texType == TEXTURE_TYPE_CUBE ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0),
487 getVkImageType(texType),
488 mapTextureFormat(format),
489 makeExtent3D(1, 1, 1),
491 (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u,
492 VK_SAMPLE_COUNT_1_BIT,
493 VK_IMAGE_TILING_OPTIMAL,
494 VK_IMAGE_USAGE_SAMPLED_BIT|VK_IMAGE_USAGE_TRANSFER_DST_BIT,
495 VK_SHARING_MODE_EXCLUSIVE,
498 VK_IMAGE_LAYOUT_UNDEFINED
501 return createImage(vkd, device, &createInfo);
504 de::MovePtr<Allocation> allocateAndBindMemory (const DeviceInterface& vkd, VkDevice device, Allocator& allocator, VkImage image)
506 de::MovePtr<Allocation> alloc = allocator.allocate(getImageMemoryRequirements(vkd, device, image), MemoryRequirement::Any);
508 VK_CHECK(vkd.bindImageMemory(device, image, alloc->getMemory(), alloc->getOffset()));
513 Move<VkImageView> createTestImageView (const DeviceInterface& vkd, VkDevice device, VkImage image, TextureType texType, tcu::TextureFormat format)
515 const bool isDepthImage = format.order == tcu::TextureFormat::D;
516 const VkImageViewCreateInfo createInfo =
518 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
520 (VkImageViewCreateFlags)0,
522 getVkImageViewType(texType),
523 mapTextureFormat(format),
525 VK_COMPONENT_SWIZZLE_IDENTITY,
526 VK_COMPONENT_SWIZZLE_IDENTITY,
527 VK_COMPONENT_SWIZZLE_IDENTITY,
528 VK_COMPONENT_SWIZZLE_IDENTITY,
531 (VkImageAspectFlags)(isDepthImage ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT),
535 (texType == TEXTURE_TYPE_CUBE ? 6u : 1u)
539 return createImageView(vkd, device, &createInfo);
542 TestImage::TestImage (Context& context, TextureType texType, tcu::TextureFormat format, const void* colorValue)
543 : m_image (createTestImage (context.getDeviceInterface(), context.getDevice(), texType, format))
544 , m_allocation (allocateAndBindMemory (context.getDeviceInterface(), context.getDevice(), context.getDefaultAllocator(), *m_image))
545 , m_imageView (createTestImageView (context.getDeviceInterface(), context.getDevice(), *m_image, texType, format))
547 const DeviceInterface& vkd = context.getDeviceInterface();
548 const VkDevice device = context.getDevice();
550 const size_t pixelSize = (size_t)format.getPixelSize();
551 const deUint32 numLayers = (texType == TEXTURE_TYPE_CUBE) ? 6u : 1u;
552 const size_t numReplicas = (size_t)numLayers;
553 const size_t stagingBufferSize = pixelSize*numReplicas;
555 const VkBufferCreateInfo stagingBufferInfo =
557 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
559 (VkBufferCreateFlags)0u,
560 (VkDeviceSize)stagingBufferSize,
561 (VkBufferCreateFlags)VK_BUFFER_USAGE_TRANSFER_SRC_BIT,
562 VK_SHARING_MODE_EXCLUSIVE,
566 const Unique<VkBuffer> stagingBuffer (createBuffer(vkd, device, &stagingBufferInfo));
567 const UniquePtr<Allocation> alloc (context.getDefaultAllocator().allocate(getBufferMemoryRequirements(vkd, device, *stagingBuffer), MemoryRequirement::HostVisible));
569 VK_CHECK(vkd.bindBufferMemory(device, *stagingBuffer, alloc->getMemory(), alloc->getOffset()));
571 for (size_t ndx = 0; ndx < numReplicas; ++ndx)
572 deMemcpy((deUint8*)alloc->getHostPtr() + ndx*pixelSize, colorValue, pixelSize);
575 const Unique<VkCommandPool> cmdPool (createCommandPool(vkd, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, context.getUniversalQueueFamilyIndex()));
576 const Unique<VkCommandBuffer> cmdBuf (allocateCommandBuffer(vkd, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
577 const VkCommandBufferBeginInfo beginInfo =
579 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
581 (VkCommandBufferUsageFlags)VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
582 (const VkCommandBufferInheritanceInfo*)DE_NULL,
584 const VkImageAspectFlags imageAspect = (VkImageAspectFlags)(format.order == tcu::TextureFormat::D ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT);
585 const VkBufferImageCopy copyInfo =
599 const VkImageMemoryBarrier preCopyBarrier =
601 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
604 (VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
605 VK_IMAGE_LAYOUT_UNDEFINED,
606 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
607 VK_QUEUE_FAMILY_IGNORED,
608 VK_QUEUE_FAMILY_IGNORED,
618 const VkImageMemoryBarrier postCopyBarrier =
620 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
622 (VkAccessFlags)VK_ACCESS_TRANSFER_WRITE_BIT,
623 (VkAccessFlags)VK_ACCESS_SHADER_READ_BIT,
624 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
625 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
626 VK_QUEUE_FAMILY_IGNORED,
627 VK_QUEUE_FAMILY_IGNORED,
638 VK_CHECK(vkd.beginCommandBuffer(*cmdBuf, &beginInfo));
639 vkd.cmdPipelineBarrier(*cmdBuf,
640 (VkPipelineStageFlags)VK_PIPELINE_STAGE_HOST_BIT,
641 (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
642 (VkDependencyFlags)0u,
644 (const VkMemoryBarrier*)DE_NULL,
646 (const VkBufferMemoryBarrier*)DE_NULL,
649 vkd.cmdCopyBufferToImage(*cmdBuf, *stagingBuffer, *m_image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Info);
650 vkd.cmdPipelineBarrier(*cmdBuf,
651 (VkPipelineStageFlags)VK_PIPELINE_STAGE_TRANSFER_BIT,
652 (VkPipelineStageFlags)VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
653 (VkDependencyFlags)0u,
655 (const VkMemoryBarrier*)DE_NULL,
657 (const VkBufferMemoryBarrier*)DE_NULL,
660 VK_CHECK(vkd.endCommandBuffer(*cmdBuf));
663 const Unique<VkFence> fence (createFence(vkd, device));
664 const VkSubmitInfo submitInfo =
666 VK_STRUCTURE_TYPE_SUBMIT_INFO,
669 (const VkSemaphore*)DE_NULL,
670 (const VkPipelineStageFlags*)DE_NULL,
674 (const VkSemaphore*)DE_NULL,
677 VK_CHECK(vkd.queueSubmit(context.getUniversalQueue(), 1u, &submitInfo, *fence));
678 VK_CHECK(vkd.waitForFences(device, 1u, &fence.get(), VK_TRUE, ~0ull));
683 typedef SharedPtr<TestImage> TestImageSp;
685 // SamplerIndexingCaseInstance
687 class SamplerIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
692 NUM_INVOCATIONS = 64,
697 SamplerIndexingCaseInstance (Context& context,
698 const glu::ShaderType shaderType,
699 const ShaderSpec& shaderSpec,
701 glu::DataType samplerType,
702 const IndexExprType indexExprType,
703 const std::vector<int>& lookupIndices);
704 virtual ~SamplerIndexingCaseInstance (void);
706 virtual tcu::TestStatus iterate (void);
709 const glu::DataType m_samplerType;
710 const std::vector<int> m_lookupIndices;
713 SamplerIndexingCaseInstance::SamplerIndexingCaseInstance (Context& context,
714 const glu::ShaderType shaderType,
715 const ShaderSpec& shaderSpec,
717 glu::DataType samplerType,
718 const IndexExprType indexExprType,
719 const std::vector<int>& lookupIndices)
720 : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
721 , m_samplerType (samplerType)
722 , m_lookupIndices (lookupIndices)
726 SamplerIndexingCaseInstance::~SamplerIndexingCaseInstance (void)
730 bool isIntegerFormat (const tcu::TextureFormat& format)
732 const tcu::TextureChannelClass chnClass = tcu::getTextureChannelClass(format.type);
734 return chnClass == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ||
735 chnClass == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER;
738 tcu::TestStatus SamplerIndexingCaseInstance::iterate (void)
740 const int numInvocations = SamplerIndexingCaseInstance::NUM_INVOCATIONS;
741 const int numSamplers = SamplerIndexingCaseInstance::NUM_SAMPLERS;
742 const int numLookups = SamplerIndexingCaseInstance::NUM_LOOKUPS;
743 const glu::DataType coordType = getSamplerCoordType(m_samplerType);
744 const glu::DataType outputType = getSamplerOutputType(m_samplerType);
745 const tcu::TextureFormat texFormat = getSamplerTextureFormat(m_samplerType);
746 const int outLookupStride = numInvocations*getDataTypeScalarSize(outputType);
747 vector<float> coords;
748 vector<deUint32> outData;
749 vector<deUint8> texData (numSamplers * texFormat.getPixelSize());
750 const tcu::PixelBufferAccess refTexAccess (texFormat, numSamplers, 1, 1, &texData[0]);
751 de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
752 const TextureType texType = getTextureType(m_samplerType);
753 const tcu::Sampler::FilterMode filterMode = (isShadowSampler(m_samplerType) || isIntegerFormat(texFormat)) ? tcu::Sampler::NEAREST : tcu::Sampler::LINEAR;
755 // The shadow sampler with unnormalized coordinates is only used with the reference texture. Actual samplers in shaders use normalized coords.
756 const tcu::Sampler refSampler = isShadowSampler(m_samplerType)
757 ? tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
758 filterMode, filterMode, 0.0f, false /* non-normalized */,
759 tcu::Sampler::COMPAREMODE_LESS)
760 : tcu::Sampler(tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
761 filterMode, filterMode);
763 const DeviceInterface& vkd = m_context.getDeviceInterface();
764 const VkDevice device = m_context.getDevice();
765 vector<TestImageSp> images;
766 vector<VkSamplerSp> samplers;
767 MovePtr<Buffer> indexBuffer;
768 Move<VkDescriptorSetLayout> extraResourcesLayout;
769 Move<VkDescriptorPool> extraResourcesSetPool;
770 Move<VkDescriptorSet> extraResourcesSet;
772 checkSupported(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
774 coords.resize(numInvocations * getDataTypeScalarSize(coordType));
776 if (texType == TEXTURE_TYPE_CUBE)
778 if (isShadowSampler(m_samplerType))
780 for (size_t i = 0; i < coords.size() / 4; i++)
782 coords[4 * i] = 1.0f;
783 coords[4 * i + 1] = coords[4 * i + 2] = coords[4 * i + 3] = 0.0f;
788 for (size_t i = 0; i < coords.size() / 3; i++)
790 coords[3 * i] = 1.0f;
791 coords[3 * i + 1] = coords[3 * i + 2] = 0.0f;
796 if (isShadowSampler(m_samplerType))
798 // Use different comparison value per invocation.
799 // \note Texture uses odd values, comparison even values.
800 const int numCoordComps = getDataTypeScalarSize(coordType);
801 const float cmpValues[] = { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
803 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
804 coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
807 fillTextureData(refTexAccess, rnd);
809 outData.resize(numLookups*outLookupStride);
811 for (int ndx = 0; ndx < numSamplers; ++ndx)
813 images.push_back(TestImageSp(new TestImage(m_context, texType, texFormat, &texData[ndx * texFormat.getPixelSize()])));
816 tcu::Sampler samplerCopy (refSampler);
817 samplerCopy.normalizedCoords = true;
820 const VkSamplerCreateInfo samplerParams = mapSampler(samplerCopy, texFormat);
821 samplers.push_back(VkSamplerSp(new Unique<VkSampler>(createSampler(vkd, device, &samplerParams))));
826 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
827 indexBuffer = createUniformIndexBuffer(m_context, numLookups, &m_lookupIndices[0]);
830 const VkDescriptorSetLayoutBinding bindings[] =
832 { 0u, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (deUint32)numSamplers, VK_SHADER_STAGE_ALL, DE_NULL },
833 { (deUint32)numSamplers, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
835 const VkDescriptorSetLayoutCreateInfo layoutInfo =
837 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
839 (VkDescriptorSetLayoutCreateFlags)0u,
840 DE_LENGTH_OF_ARRAY(bindings),
844 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
848 const VkDescriptorPoolSize poolSizes[] =
850 { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, (deUint32)numSamplers },
851 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
853 const VkDescriptorPoolCreateInfo poolInfo =
855 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
857 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
859 DE_LENGTH_OF_ARRAY(poolSizes),
863 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
867 const VkDescriptorSetAllocateInfo allocInfo =
869 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
871 *extraResourcesSetPool,
873 &extraResourcesLayout.get(),
876 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
880 vector<VkDescriptorImageInfo> imageInfos (numSamplers);
881 const VkWriteDescriptorSet descriptorWrite =
883 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
887 0u, // dstArrayElement
888 (deUint32)numSamplers,
889 VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
891 (const VkDescriptorBufferInfo*)DE_NULL,
892 (const VkBufferView*)DE_NULL,
895 for (int ndx = 0; ndx < numSamplers; ++ndx)
897 imageInfos[ndx].sampler = **samplers[ndx];
898 imageInfos[ndx].imageView = images[ndx]->getImageView();
899 imageInfos[ndx].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
902 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
907 const VkDescriptorBufferInfo bufferInfo =
909 indexBuffer->getBuffer(),
913 const VkWriteDescriptorSet descriptorWrite =
915 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
918 (deUint32)numSamplers, // dstBinding
919 0u, // dstArrayElement
921 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
922 (const VkDescriptorImageInfo*)DE_NULL,
924 (const VkBufferView*)DE_NULL,
927 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
931 std::vector<void*> inputs;
932 std::vector<void*> outputs;
933 std::vector<int> expandedIndices;
934 UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
936 inputs.push_back(&coords[0]);
938 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
940 expandedIndices.resize(numInvocations * m_lookupIndices.size());
941 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
943 for (int invNdx = 0; invNdx < numInvocations; invNdx++)
944 expandedIndices[lookupNdx*numInvocations + invNdx] = m_lookupIndices[lookupNdx];
947 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
948 inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
951 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
952 outputs.push_back(&outData[outLookupStride*lookupNdx]);
954 executor->execute(numInvocations, &inputs[0], &outputs[0], *extraResourcesSet);
958 tcu::TestLog& log = m_context.getTestContext().getLog();
959 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
961 if (isShadowSampler(m_samplerType))
963 const int numCoordComps = getDataTypeScalarSize(coordType);
965 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
967 // Each invocation may have different results.
968 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
970 const float coord = coords[invocationNdx*numCoordComps + (numCoordComps-1)];
972 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
974 const int texNdx = m_lookupIndices[lookupNdx];
975 const float result = *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
976 const float reference = refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
978 if (de::abs(result-reference) > 0.005f)
980 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
981 << reference << ", got " << result
982 << tcu::TestLog::EndMessage;
984 if (testResult.getCode() == QP_TEST_RESULT_PASS)
985 testResult = tcu::TestStatus::fail("Got invalid lookup result");
992 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
994 // Validate results from first invocation
995 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
997 const int texNdx = m_lookupIndices[lookupNdx];
998 const deUint8* resPtr = (const deUint8*)&outData[lookupNdx*outLookupStride];
1001 if (outputType == glu::TYPE_FLOAT_VEC4)
1003 const float threshold = 1.0f / 256.0f;
1004 const tcu::Vec4 reference = refTexAccess.getPixel(texNdx, 0);
1005 const float* floatPtr = (const float*)resPtr;
1006 const tcu::Vec4 result (floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
1008 isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
1012 log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
1013 << reference << ", got " << result
1014 << tcu::TestLog::EndMessage;
1019 const tcu::UVec4 reference = refTexAccess.getPixelUint(texNdx, 0);
1020 const deUint32* uintPtr = (const deUint32*)resPtr;
1021 const tcu::UVec4 result (uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
1023 isOk = boolAll(equal(reference, result));
1027 log << tcu::TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
1028 << reference << ", got " << result
1029 << tcu::TestLog::EndMessage;
1033 if (!isOk && testResult.getCode() == QP_TEST_RESULT_PASS)
1034 testResult = tcu::TestStatus::fail("Got invalid lookup result");
1037 // Check results of other invocations against first one
1038 for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
1040 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
1042 const deUint32* refPtr = &outData[lookupNdx*outLookupStride];
1043 const deUint32* resPtr = refPtr + invocationNdx*4;
1046 for (int ndx = 0; ndx < 4; ndx++)
1047 isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
1051 log << tcu::TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
1052 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
1053 << " for lookup " << lookupNdx << " doesn't match result from first invocation "
1054 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
1055 << tcu::TestLog::EndMessage;
1057 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1058 testResult = tcu::TestStatus::fail("Inconsistent lookup results");
1068 class SamplerIndexingCase : public OpaqueTypeIndexingCase
1071 SamplerIndexingCase (tcu::TestContext& testCtx,
1073 const char* description,
1074 const glu::ShaderType shaderType,
1075 glu::DataType samplerType,
1076 IndexExprType indexExprType);
1077 virtual ~SamplerIndexingCase (void);
1079 virtual TestInstance* createInstance (Context& ctx) const;
1082 SamplerIndexingCase (const SamplerIndexingCase&);
1083 SamplerIndexingCase& operator= (const SamplerIndexingCase&);
1085 void createShaderSpec (void);
1087 const glu::DataType m_samplerType;
1088 const int m_numSamplers;
1089 const int m_numLookups;
1090 std::vector<int> m_lookupIndices;
1093 SamplerIndexingCase::SamplerIndexingCase (tcu::TestContext& testCtx,
1095 const char* description,
1096 const glu::ShaderType shaderType,
1097 glu::DataType samplerType,
1098 IndexExprType indexExprType)
1099 : OpaqueTypeIndexingCase (testCtx, name, description, shaderType, indexExprType)
1100 , m_samplerType (samplerType)
1101 , m_numSamplers (SamplerIndexingCaseInstance::NUM_SAMPLERS)
1102 , m_numLookups (SamplerIndexingCaseInstance::NUM_LOOKUPS)
1103 , m_lookupIndices (m_numLookups)
1109 SamplerIndexingCase::~SamplerIndexingCase (void)
1113 TestInstance* SamplerIndexingCase::createInstance (Context& ctx) const
1115 return new SamplerIndexingCaseInstance(ctx,
1124 void SamplerIndexingCase::createShaderSpec (void)
1126 de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1127 const char* samplersName = "texSampler";
1128 const char* coordsName = "coords";
1129 const char* indicesPrefix = "index";
1130 const char* resultPrefix = "result";
1131 const glu::DataType coordType = getSamplerCoordType(m_samplerType);
1132 const glu::DataType outType = getSamplerOutputType(m_samplerType);
1133 std::ostringstream global, code;
1135 for (int ndx = 0; ndx < m_numLookups; ndx++)
1136 m_lookupIndices[ndx] = rnd.getInt(0, m_numSamplers-1);
1138 m_shaderSpec.inputs.push_back(Symbol(coordsName, glu::VarType(coordType, glu::PRECISION_HIGHP)));
1140 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1141 global << "#extension GL_EXT_gpu_shader5 : require\n";
1143 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1144 global << "const highp int indexBase = 1;\n";
1147 "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << m_numSamplers << "];\n";
1149 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1151 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1153 const std::string varName = indicesPrefix + de::toString(lookupNdx);
1154 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1157 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1158 declareUniformIndexVars(global, (deUint32)m_numSamplers, indicesPrefix, m_numLookups);
1160 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1162 const std::string varName = resultPrefix + de::toString(lookupNdx);
1163 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(outType, glu::PRECISION_HIGHP)));
1166 for (int lookupNdx = 0; lookupNdx < m_numLookups; lookupNdx++)
1168 code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
1170 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1171 code << m_lookupIndices[lookupNdx];
1172 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1173 code << "indexBase + " << (m_lookupIndices[lookupNdx]-1);
1175 code << indicesPrefix << lookupNdx;
1177 code << "], " << coordsName << ");\n";
1180 m_shaderSpec.globalDeclarations = global.str();
1181 m_shaderSpec.source = code.str();
1186 BLOCKTYPE_UNIFORM = 0,
1192 class BlockArrayIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1197 NUM_INVOCATIONS = 32,
1202 BlockArrayIndexingCaseInstance (Context& context,
1203 const glu::ShaderType shaderType,
1204 const ShaderSpec& shaderSpec,
1206 BlockType blockType,
1207 const IndexExprType indexExprType,
1208 const std::vector<int>& readIndices,
1209 const std::vector<deUint32>& inValues);
1210 virtual ~BlockArrayIndexingCaseInstance (void);
1212 virtual tcu::TestStatus iterate (void);
1215 const BlockType m_blockType;
1216 const std::vector<int>& m_readIndices;
1217 const std::vector<deUint32>& m_inValues;
1220 BlockArrayIndexingCaseInstance::BlockArrayIndexingCaseInstance (Context& context,
1221 const glu::ShaderType shaderType,
1222 const ShaderSpec& shaderSpec,
1224 BlockType blockType,
1225 const IndexExprType indexExprType,
1226 const std::vector<int>& readIndices,
1227 const std::vector<deUint32>& inValues)
1228 : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
1229 , m_blockType (blockType)
1230 , m_readIndices (readIndices)
1231 , m_inValues (inValues)
1235 BlockArrayIndexingCaseInstance::~BlockArrayIndexingCaseInstance (void)
1239 tcu::TestStatus BlockArrayIndexingCaseInstance::iterate (void)
1241 const int numInvocations = NUM_INVOCATIONS;
1242 const int numReads = NUM_READS;
1243 std::vector<deUint32> outValues (numInvocations*numReads);
1245 tcu::TestLog& log = m_context.getTestContext().getLog();
1246 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
1248 std::vector<int> expandedIndices;
1249 std::vector<void*> inputs;
1250 std::vector<void*> outputs;
1251 const VkBufferUsageFlags bufferUsage = m_blockType == BLOCKTYPE_UNIFORM ? VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
1252 const VkDescriptorType descriptorType = m_blockType == BLOCKTYPE_UNIFORM ? VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
1254 const DeviceInterface& vkd = m_context.getDeviceInterface();
1255 const VkDevice device = m_context.getDevice();
1257 // \note Using separate buffer per element - might want to test
1258 // offsets & single buffer in the future.
1259 vector<BufferSp> buffers (m_inValues.size());
1260 MovePtr<Buffer> indexBuffer;
1262 Move<VkDescriptorSetLayout> extraResourcesLayout;
1263 Move<VkDescriptorPool> extraResourcesSetPool;
1264 Move<VkDescriptorSet> extraResourcesSet;
1266 checkSupported(descriptorType);
1268 for (size_t bufferNdx = 0; bufferNdx < m_inValues.size(); ++bufferNdx)
1270 buffers[bufferNdx] = BufferSp(new Buffer(m_context, bufferUsage, sizeof(deUint32)));
1271 *(deUint32*)buffers[bufferNdx]->getHostPtr() = m_inValues[bufferNdx];
1272 buffers[bufferNdx]->flush();
1275 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1276 indexBuffer = createUniformIndexBuffer(m_context, numReads, &m_readIndices[0]);
1279 const VkDescriptorSetLayoutBinding bindings[] =
1281 { 0u, descriptorType, (deUint32)m_inValues.size(), VK_SHADER_STAGE_ALL, DE_NULL },
1282 { (deUint32)m_inValues.size(), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
1284 const VkDescriptorSetLayoutCreateInfo layoutInfo =
1286 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1288 (VkDescriptorSetLayoutCreateFlags)0u,
1289 DE_LENGTH_OF_ARRAY(bindings),
1293 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1297 const VkDescriptorPoolSize poolSizes[] =
1299 { descriptorType, (deUint32)m_inValues.size() },
1300 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
1302 const VkDescriptorPoolCreateInfo poolInfo =
1304 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1306 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1308 DE_LENGTH_OF_ARRAY(poolSizes),
1312 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1316 const VkDescriptorSetAllocateInfo allocInfo =
1318 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1320 *extraResourcesSetPool,
1322 &extraResourcesLayout.get(),
1325 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1329 vector<VkDescriptorBufferInfo> bufferInfos (m_inValues.size());
1330 const VkWriteDescriptorSet descriptorWrite =
1332 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1336 0u, // dstArrayElement
1337 (deUint32)m_inValues.size(),
1339 (const VkDescriptorImageInfo*)DE_NULL,
1341 (const VkBufferView*)DE_NULL,
1344 for (size_t ndx = 0; ndx < m_inValues.size(); ++ndx)
1346 bufferInfos[ndx].buffer = buffers[ndx]->getBuffer();
1347 bufferInfos[ndx].offset = 0u;
1348 bufferInfos[ndx].range = VK_WHOLE_SIZE;
1351 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1356 const VkDescriptorBufferInfo bufferInfo =
1358 indexBuffer->getBuffer(),
1362 const VkWriteDescriptorSet descriptorWrite =
1364 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1367 (deUint32)m_inValues.size(), // dstBinding
1368 0u, // dstArrayElement
1370 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1371 (const VkDescriptorImageInfo*)DE_NULL,
1373 (const VkBufferView*)DE_NULL,
1376 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1379 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1381 expandedIndices.resize(numInvocations * m_readIndices.size());
1383 for (int readNdx = 0; readNdx < numReads; readNdx++)
1385 int* dst = &expandedIndices[numInvocations*readNdx];
1386 std::fill(dst, dst+numInvocations, m_readIndices[readNdx]);
1389 for (int readNdx = 0; readNdx < numReads; readNdx++)
1390 inputs.push_back(&expandedIndices[readNdx*numInvocations]);
1393 for (int readNdx = 0; readNdx < numReads; readNdx++)
1394 outputs.push_back(&outValues[readNdx*numInvocations]);
1397 UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1399 executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1402 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1404 for (int readNdx = 0; readNdx < numReads; readNdx++)
1406 const deUint32 refValue = m_inValues[m_readIndices[readNdx]];
1407 const deUint32 resValue = outValues[readNdx*numInvocations + invocationNdx];
1409 if (refValue != resValue)
1411 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1412 << ", read " << readNdx << ": expected "
1413 << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
1414 << tcu::TestLog::EndMessage;
1416 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1417 testResult = tcu::TestStatus::fail("Invalid result value");
1425 class BlockArrayIndexingCase : public OpaqueTypeIndexingCase
1428 BlockArrayIndexingCase (tcu::TestContext& testCtx,
1430 const char* description,
1431 BlockType blockType,
1432 IndexExprType indexExprType,
1433 const glu::ShaderType shaderType);
1434 virtual ~BlockArrayIndexingCase (void);
1436 virtual TestInstance* createInstance (Context& ctx) const;
1439 BlockArrayIndexingCase (const BlockArrayIndexingCase&);
1440 BlockArrayIndexingCase& operator= (const BlockArrayIndexingCase&);
1442 void createShaderSpec (void);
1444 const BlockType m_blockType;
1445 std::vector<int> m_readIndices;
1446 std::vector<deUint32> m_inValues;
1449 BlockArrayIndexingCase::BlockArrayIndexingCase (tcu::TestContext& testCtx,
1451 const char* description,
1452 BlockType blockType,
1453 IndexExprType indexExprType,
1454 const glu::ShaderType shaderType)
1455 : OpaqueTypeIndexingCase (testCtx, name, description, shaderType, indexExprType)
1456 , m_blockType (blockType)
1457 , m_readIndices (BlockArrayIndexingCaseInstance::NUM_READS)
1458 , m_inValues (BlockArrayIndexingCaseInstance::NUM_INSTANCES)
1464 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
1468 TestInstance* BlockArrayIndexingCase::createInstance (Context& ctx) const
1470 return new BlockArrayIndexingCaseInstance(ctx,
1480 void BlockArrayIndexingCase::createShaderSpec (void)
1482 const int numInstances = BlockArrayIndexingCaseInstance::NUM_INSTANCES;
1483 const int numReads = BlockArrayIndexingCaseInstance::NUM_READS;
1484 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
1485 const char* blockName = "Block";
1486 const char* instanceName = "block";
1487 const char* indicesPrefix = "index";
1488 const char* resultPrefix = "result";
1489 const char* interfaceName = m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
1490 std::ostringstream global, code;
1492 for (int readNdx = 0; readNdx < numReads; readNdx++)
1493 m_readIndices[readNdx] = rnd.getInt(0, numInstances-1);
1495 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1496 m_inValues[instanceNdx] = rnd.getUint32();
1498 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1499 global << "#extension GL_EXT_gpu_shader5 : require\n";
1501 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1502 global << "const highp int indexBase = 1;\n";
1505 "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0) " << interfaceName << " " << blockName << "\n"
1507 " highp uint value;\n"
1508 "} " << instanceName << "[" << numInstances << "];\n";
1510 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1512 for (int readNdx = 0; readNdx < numReads; readNdx++)
1514 const std::string varName = indicesPrefix + de::toString(readNdx);
1515 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1518 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1519 declareUniformIndexVars(global, (deUint32)m_inValues.size(), indicesPrefix, numReads);
1521 for (int readNdx = 0; readNdx < numReads; readNdx++)
1523 const std::string varName = resultPrefix + de::toString(readNdx);
1524 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1527 for (int readNdx = 0; readNdx < numReads; readNdx++)
1529 code << resultPrefix << readNdx << " = " << instanceName << "[";
1531 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1532 code << m_readIndices[readNdx];
1533 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1534 code << "indexBase + " << (m_readIndices[readNdx]-1);
1536 code << indicesPrefix << readNdx;
1538 code << "].value;\n";
1541 m_shaderSpec.globalDeclarations = global.str();
1542 m_shaderSpec.source = code.str();
1545 class AtomicCounterIndexingCaseInstance : public OpaqueTypeIndexingTestInstance
1550 NUM_INVOCATIONS = 32,
1555 AtomicCounterIndexingCaseInstance (Context& context,
1556 const glu::ShaderType shaderType,
1557 const ShaderSpec& shaderSpec,
1559 const std::vector<int>& opIndices,
1560 const IndexExprType indexExprType);
1561 virtual ~AtomicCounterIndexingCaseInstance (void);
1563 virtual tcu::TestStatus iterate (void);
1566 const std::vector<int>& m_opIndices;
1569 AtomicCounterIndexingCaseInstance::AtomicCounterIndexingCaseInstance (Context& context,
1570 const glu::ShaderType shaderType,
1571 const ShaderSpec& shaderSpec,
1573 const std::vector<int>& opIndices,
1574 const IndexExprType indexExprType)
1575 : OpaqueTypeIndexingTestInstance (context, shaderType, shaderSpec, name, indexExprType)
1576 , m_opIndices (opIndices)
1580 AtomicCounterIndexingCaseInstance::~AtomicCounterIndexingCaseInstance (void)
1584 tcu::TestStatus AtomicCounterIndexingCaseInstance::iterate (void)
1586 // \todo [2015-12-02 elecro] Add vertexPipelineStoresAndAtomics feature check.
1587 const int numInvocations = NUM_INVOCATIONS;
1588 const int numCounters = NUM_COUNTERS;
1589 const int numOps = NUM_OPS;
1590 std::vector<int> expandedIndices;
1591 std::vector<void*> inputs;
1592 std::vector<void*> outputs;
1593 std::vector<deUint32> outValues (numInvocations*numOps);
1595 const DeviceInterface& vkd = m_context.getDeviceInterface();
1596 const VkDevice device = m_context.getDevice();
1598 // \note Using separate buffer per element - might want to test
1599 // offsets & single buffer in the future.
1600 Buffer atomicOpBuffer (m_context, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, sizeof(deUint32)*numCounters);
1601 MovePtr<Buffer> indexBuffer;
1603 Move<VkDescriptorSetLayout> extraResourcesLayout;
1604 Move<VkDescriptorPool> extraResourcesSetPool;
1605 Move<VkDescriptorSet> extraResourcesSet;
1607 checkSupported(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
1609 deMemset(atomicOpBuffer.getHostPtr(), 0, sizeof(deUint32)*numCounters);
1610 atomicOpBuffer.flush();
1612 if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1613 indexBuffer = createUniformIndexBuffer(m_context, numOps, &m_opIndices[0]);
1616 const VkDescriptorSetLayoutBinding bindings[] =
1618 { 0u, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL },
1619 { 1u, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, VK_SHADER_STAGE_ALL, DE_NULL }
1621 const VkDescriptorSetLayoutCreateInfo layoutInfo =
1623 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1625 (VkDescriptorSetLayoutCreateFlags)0u,
1626 DE_LENGTH_OF_ARRAY(bindings),
1630 extraResourcesLayout = createDescriptorSetLayout(vkd, device, &layoutInfo);
1634 const VkDescriptorPoolSize poolSizes[] =
1636 { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1u, },
1637 { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1u, }
1639 const VkDescriptorPoolCreateInfo poolInfo =
1641 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
1643 (VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1645 DE_LENGTH_OF_ARRAY(poolSizes),
1649 extraResourcesSetPool = createDescriptorPool(vkd, device, &poolInfo);
1653 const VkDescriptorSetAllocateInfo allocInfo =
1655 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1657 *extraResourcesSetPool,
1659 &extraResourcesLayout.get(),
1662 extraResourcesSet = allocateDescriptorSet(vkd, device, &allocInfo);
1666 const VkDescriptorBufferInfo bufferInfo =
1668 atomicOpBuffer.getBuffer(),
1672 const VkWriteDescriptorSet descriptorWrite =
1674 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1678 0u, // dstArrayElement
1680 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,
1681 (const VkDescriptorImageInfo*)DE_NULL,
1683 (const VkBufferView*)DE_NULL,
1686 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1691 const VkDescriptorBufferInfo bufferInfo =
1693 indexBuffer->getBuffer(),
1697 const VkWriteDescriptorSet descriptorWrite =
1699 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
1703 0u, // dstArrayElement
1705 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1706 (const VkDescriptorImageInfo*)DE_NULL,
1708 (const VkBufferView*)DE_NULL,
1711 vkd.updateDescriptorSets(device, 1u, &descriptorWrite, 0u, DE_NULL);
1714 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1716 expandedIndices.resize(numInvocations * m_opIndices.size());
1718 for (int opNdx = 0; opNdx < numOps; opNdx++)
1720 int* dst = &expandedIndices[numInvocations*opNdx];
1721 std::fill(dst, dst+numInvocations, m_opIndices[opNdx]);
1724 for (int opNdx = 0; opNdx < numOps; opNdx++)
1725 inputs.push_back(&expandedIndices[opNdx*numInvocations]);
1728 for (int opNdx = 0; opNdx < numOps; opNdx++)
1729 outputs.push_back(&outValues[opNdx*numInvocations]);
1732 UniquePtr<ShaderExecutor> executor (createExecutor(m_context, m_shaderType, m_shaderSpec, *extraResourcesLayout));
1734 executor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0], *extraResourcesSet);
1738 tcu::TestLog& log = m_context.getTestContext().getLog();
1739 tcu::TestStatus testResult = tcu::TestStatus::pass("Pass");
1740 std::vector<int> numHits (numCounters, 0); // Number of hits per counter.
1741 std::vector<deUint32> counterValues (numCounters);
1742 std::vector<std::vector<bool> > counterMasks (numCounters);
1744 for (int opNdx = 0; opNdx < numOps; opNdx++)
1745 numHits[m_opIndices[opNdx]] += 1;
1747 // Read counter values
1749 const void* mapPtr = atomicOpBuffer.getHostPtr();
1750 DE_ASSERT(mapPtr != DE_NULL);
1751 atomicOpBuffer.invalidate();
1752 std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
1755 // Verify counter values
1756 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1758 const deUint32 refCount = (deUint32)(numHits[counterNdx]*numInvocations);
1759 const deUint32 resCount = counterValues[counterNdx];
1761 if (refCount != resCount)
1763 log << tcu::TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1764 << ", expected " << refCount
1765 << tcu::TestLog::EndMessage;
1767 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1768 testResult = tcu::TestStatus::fail("Invalid atomic counter value");
1772 // Allocate bitmasks - one bit per each valid result value
1773 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1775 const int counterValue = numHits[counterNdx]*numInvocations;
1776 counterMasks[counterNdx].resize(counterValue, false);
1779 // Verify result values from shaders
1780 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1782 for (int opNdx = 0; opNdx < numOps; opNdx++)
1784 const int counterNdx = m_opIndices[opNdx];
1785 const deUint32 resValue = outValues[opNdx*numInvocations + invocationNdx];
1786 const bool rangeOk = de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
1787 const bool notSeen = rangeOk && !counterMasks[counterNdx][resValue];
1788 const bool isOk = rangeOk && notSeen;
1792 log << tcu::TestLog::Message << "ERROR: at invocation " << invocationNdx
1793 << ", op " << opNdx << ": got invalid result value "
1795 << tcu::TestLog::EndMessage;
1797 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1798 testResult = tcu::TestStatus::fail("Invalid result value");
1802 // Mark as used - no other invocation should see this value from same counter.
1803 counterMasks[counterNdx][resValue] = true;
1808 if (testResult.getCode() == QP_TEST_RESULT_PASS)
1810 // Consistency check - all masks should be 1 now
1811 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1813 for (std::vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
1814 TCU_CHECK_INTERNAL(*i);
1822 class AtomicCounterIndexingCase : public OpaqueTypeIndexingCase
1825 AtomicCounterIndexingCase (tcu::TestContext& testCtx,
1827 const char* description,
1828 IndexExprType indexExprType,
1829 const glu::ShaderType shaderType);
1830 virtual ~AtomicCounterIndexingCase (void);
1832 virtual TestInstance* createInstance (Context& ctx) const;
1835 AtomicCounterIndexingCase (const BlockArrayIndexingCase&);
1836 AtomicCounterIndexingCase& operator= (const BlockArrayIndexingCase&);
1838 void createShaderSpec (void);
1840 std::vector<int> m_opIndices;
1843 AtomicCounterIndexingCase::AtomicCounterIndexingCase (tcu::TestContext& testCtx,
1845 const char* description,
1846 IndexExprType indexExprType,
1847 const glu::ShaderType shaderType)
1848 : OpaqueTypeIndexingCase (testCtx, name, description, shaderType, indexExprType)
1849 , m_opIndices (AtomicCounterIndexingCaseInstance::NUM_OPS)
1855 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
1859 TestInstance* AtomicCounterIndexingCase::createInstance (Context& ctx) const
1861 return new AtomicCounterIndexingCaseInstance(ctx,
1869 void AtomicCounterIndexingCase::createShaderSpec (void)
1871 const int numCounters = AtomicCounterIndexingCaseInstance::NUM_COUNTERS;
1872 const int numOps = AtomicCounterIndexingCaseInstance::NUM_OPS;
1873 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1875 for (int opNdx = 0; opNdx < numOps; opNdx++)
1876 m_opIndices[opNdx] = rnd.getInt(0, numOps-1);
1879 const char* indicesPrefix = "index";
1880 const char* resultPrefix = "result";
1881 std::ostringstream global, code;
1883 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL)
1884 global << "#extension GL_EXT_gpu_shader5 : require\n";
1886 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1887 global << "const highp int indexBase = 1;\n";
1890 "layout(set = " << EXTRA_RESOURCES_DESCRIPTOR_SET_INDEX << ", binding = 0, std430) buffer AtomicBuffer { highp uint counter[" << numCounters << "]; };\n";
1892 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1894 for (int opNdx = 0; opNdx < numOps; opNdx++)
1896 const std::string varName = indicesPrefix + de::toString(opNdx);
1897 m_shaderSpec.inputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_INT, glu::PRECISION_HIGHP)));
1900 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1901 declareUniformIndexVars(global, 1, indicesPrefix, numOps);
1903 for (int opNdx = 0; opNdx < numOps; opNdx++)
1905 const std::string varName = resultPrefix + de::toString(opNdx);
1906 m_shaderSpec.outputs.push_back(Symbol(varName, glu::VarType(glu::TYPE_UINT, glu::PRECISION_HIGHP)));
1909 for (int opNdx = 0; opNdx < numOps; opNdx++)
1911 code << resultPrefix << opNdx << " = atomicAdd(counter[";
1913 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1914 code << m_opIndices[opNdx];
1915 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1916 code << "indexBase + " << (m_opIndices[opNdx]-1);
1918 code << indicesPrefix << opNdx;
1920 code << "], uint(1));\n";
1923 m_shaderSpec.globalDeclarations = global.str();
1924 m_shaderSpec.source = code.str();
1928 class OpaqueTypeIndexingTests : public tcu::TestCaseGroup
1931 OpaqueTypeIndexingTests (tcu::TestContext& testCtx);
1932 virtual ~OpaqueTypeIndexingTests (void);
1934 virtual void init (void);
1937 OpaqueTypeIndexingTests (const OpaqueTypeIndexingTests&);
1938 OpaqueTypeIndexingTests& operator= (const OpaqueTypeIndexingTests&);
1941 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (tcu::TestContext& testCtx)
1942 : tcu::TestCaseGroup(testCtx, "opaque_type_indexing", "Opaque Type Indexing Tests")
1946 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1950 void OpaqueTypeIndexingTests::init (void)
1956 const char* description;
1959 { INDEX_EXPR_TYPE_CONST_LITERAL, "const_literal", "Indexing by constant literal" },
1960 { INDEX_EXPR_TYPE_CONST_EXPRESSION, "const_expression", "Indexing by constant expression" },
1961 { INDEX_EXPR_TYPE_UNIFORM, "uniform", "Indexing by uniform value" },
1962 { INDEX_EXPR_TYPE_DYNAMIC_UNIFORM, "dynamically_uniform", "Indexing by dynamically uniform expression" }
1967 glu::ShaderType type;
1971 { glu::SHADERTYPE_VERTEX, "vertex" },
1972 { glu::SHADERTYPE_FRAGMENT, "fragment" },
1973 { glu::SHADERTYPE_COMPUTE, "compute" }
1978 static const glu::DataType samplerTypes[] =
1980 // \note 1D images will be added by a later extension.
1981 // glu::TYPE_SAMPLER_1D,
1982 glu::TYPE_SAMPLER_2D,
1983 glu::TYPE_SAMPLER_CUBE,
1984 glu::TYPE_SAMPLER_2D_ARRAY,
1985 glu::TYPE_SAMPLER_3D,
1986 // glu::TYPE_SAMPLER_1D_SHADOW,
1987 glu::TYPE_SAMPLER_2D_SHADOW,
1988 glu::TYPE_SAMPLER_CUBE_SHADOW,
1989 glu::TYPE_SAMPLER_2D_ARRAY_SHADOW,
1990 // glu::TYPE_INT_SAMPLER_1D,
1991 glu::TYPE_INT_SAMPLER_2D,
1992 glu::TYPE_INT_SAMPLER_CUBE,
1993 glu::TYPE_INT_SAMPLER_2D_ARRAY,
1994 glu::TYPE_INT_SAMPLER_3D,
1995 // glu::TYPE_UINT_SAMPLER_1D,
1996 glu::TYPE_UINT_SAMPLER_2D,
1997 glu::TYPE_UINT_SAMPLER_CUBE,
1998 glu::TYPE_UINT_SAMPLER_2D_ARRAY,
1999 glu::TYPE_UINT_SAMPLER_3D,
2002 tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
2003 addChild(samplerGroup);
2005 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
2007 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
2008 tcu::TestCaseGroup* const indexGroup = new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
2009 samplerGroup->addChild(indexGroup);
2011 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
2013 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
2014 tcu::TestCaseGroup* const shaderGroup = new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
2015 indexGroup->addChild(shaderGroup);
2017 for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
2019 const glu::DataType samplerType = samplerTypes[samplerTypeNdx];
2020 const char* samplerName = getDataTypeName(samplerType);
2021 const std::string caseName = de::toLower(samplerName);
2023 shaderGroup->addChild(new SamplerIndexingCase(m_testCtx, caseName.c_str(), "", shaderType, samplerType, indexExprType));
2029 // .ubo / .ssbo / .atomic_counter
2031 tcu::TestCaseGroup* const uboGroup = new tcu::TestCaseGroup(m_testCtx, "ubo", "Uniform Block Instance Array Indexing Tests");
2032 tcu::TestCaseGroup* const ssboGroup = new tcu::TestCaseGroup(m_testCtx, "ssbo", "Buffer Block Instance Array Indexing Tests");
2033 tcu::TestCaseGroup* const acGroup = new tcu::TestCaseGroup(m_testCtx, "atomic_counter", "Atomic Counter Array Indexing Tests");
2035 addChild(ssboGroup);
2038 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
2040 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
2041 const char* indexExprName = indexingTypes[indexTypeNdx].name;
2042 const char* indexExprDesc = indexingTypes[indexTypeNdx].description;
2044 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
2046 const glu::ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
2047 const std::string name = std::string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
2049 uboGroup->addChild (new BlockArrayIndexingCase (m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_UNIFORM, indexExprType, shaderType));
2050 acGroup->addChild (new AtomicCounterIndexingCase (m_testCtx, name.c_str(), indexExprDesc, indexExprType, shaderType));
2052 if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
2053 ssboGroup->addChild (new BlockArrayIndexingCase (m_testCtx, name.c_str(), indexExprDesc, BLOCKTYPE_BUFFER, indexExprType, shaderType));
2061 tcu::TestCaseGroup* createOpaqueTypeIndexingTests (tcu::TestContext& testCtx)
2063 return new OpaqueTypeIndexingTests(testCtx);