From 0abb8ae884208e3355eb68bde6cedab1dd7b773c Mon Sep 17 00:00:00 2001 From: Kantoch Date: Tue, 12 Apr 2016 10:47:10 +0200 Subject: [PATCH] SPIR-V Shader Intrinsics for Sparse Images Test --- doc/testspecs/VK/sparse_resources.txt | 18 + .../modules/vulkan/sparse_resources/CMakeLists.txt | 8 + .../sparse_resources/vktSparseResourcesBase.cpp | 14 +- .../sparse_resources/vktSparseResourcesBase.hpp | 5 + .../vktSparseResourcesShaderIntrinsics.cpp | 140 +++ .../vktSparseResourcesShaderIntrinsics.hpp | 39 + .../vktSparseResourcesShaderIntrinsicsBase.cpp | 627 +++++++++++ .../vktSparseResourcesShaderIntrinsicsBase.hpp | 176 +++ .../vktSparseResourcesShaderIntrinsicsSampled.cpp | 793 ++++++++++++++ .../vktSparseResourcesShaderIntrinsicsSampled.hpp | 126 +++ .../vktSparseResourcesShaderIntrinsicsStorage.cpp | 582 ++++++++++ .../vktSparseResourcesShaderIntrinsicsStorage.hpp | 95 ++ .../sparse_resources/vktSparseResourcesTests.cpp | 4 +- .../vktSparseResourcesTestsUtil.cpp | 503 ++++++++- .../vktSparseResourcesTestsUtil.hpp | 205 +++- .../mustpass/1.0.1/com.drawelements.deqp.vk.xml | 1120 ++++++++++++++++++++ external/vulkancts/mustpass/1.0.1/vk-default.txt | 312 ++++++ 17 files changed, 4718 insertions(+), 49 deletions(-) create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.cpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.hpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.cpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.hpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.cpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.hpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.cpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.hpp diff --git a/doc/testspecs/VK/sparse_resources.txt b/doc/testspecs/VK/sparse_resources.txt index 315e00f..f502556 100644 --- a/doc/testspecs/VK/sparse_resources.txt +++ b/doc/testspecs/VK/sparse_resources.txt @@ -13,6 +13,7 @@ Includes: 5. Test partially resident image with mipmaps, put some mipmap levels in mip tail region 6. Test memory aliasing for fully resident buffer objects 7. Test memory aliasing for partially resident images +8. Test OpImageSparse* shader intrinsics Description: @@ -140,3 +141,20 @@ The validation part retrieves data back from output buffer to host memory. For e compared against the expected output from compute shader. On the other hand for each mipmap level that landed in the mip tail region, the data is compared against data stored in the input buffer (the compute shader could not have changed this data). The test passes if for each mipmap level the comparison results in both data sets being the same. + +8. Test OpImageSparse* shader intrinsics + +The test creates sparse partially resident image. The memory is bound to the image every second mipmap level. + +The test creates also a second non-sparse texels image with the same dimensions and format as the sparse one and +a third residency image with the same dimensions as the sparse one and unsigned int format. + +For OpImageSparse* opcodes that are fed with float image coordinates the test creates a graphics queue, otherwise a compute queue is created. +In both cases the commands submited to queue have the purpose of copying the data from sparse image to texels and residency images using one +of the OpImageSparse* shader intrinsics. For graphics operations the data is copied via rendering to two color attachments, for compute operations +the data is copied via image load/store. + +Data is retreived from the non-sparse images back to the CPU. Contents of the texels image are compared against the data originaly sent to the sparse image. +For mipmap levels of the sparse image that do not have backing device memory, the fetched data is compared against zeroed memory if residencyNonResidentStrict is set to VK_TRUE, +otherwise comparion for those mipmap levels is ommited. The data fetched from the residency image is checked, if for each mipmap level the OpImageSparseTexelsResident +returned correct residency information. diff --git a/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt b/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt index afc96ff..1548775 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt +++ b/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt @@ -1,6 +1,14 @@ include_directories(..) set(DEQP_VK_IMAGE_SRCS + vktSparseResourcesShaderIntrinsics.cpp + vktSparseResourcesShaderIntrinsics.hpp + vktSparseResourcesShaderIntrinsicsBase.cpp + vktSparseResourcesShaderIntrinsicsBase.hpp + vktSparseResourcesShaderIntrinsicsSampled.cpp + vktSparseResourcesShaderIntrinsicsSampled.hpp + vktSparseResourcesShaderIntrinsicsStorage.cpp + vktSparseResourcesShaderIntrinsicsStorage.hpp vktSparseResourcesImageMemoryAliasing.cpp vktSparseResourcesImageMemoryAliasing.hpp vktSparseResourcesBufferMemoryAliasing.cpp diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp index a3427b6..775baeb 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.cpp @@ -190,8 +190,8 @@ bool SparseResourcesBaseInstance::checkSparseSupportForImageType (const Instance } bool SparseResourcesBaseInstance::checkSparseSupportForImageFormat (const InstanceInterface& instance, - const VkPhysicalDevice physicalDevice, - const VkImageCreateInfo& imageInfo) const + const VkPhysicalDevice physicalDevice, + const VkImageCreateInfo& imageInfo) const { const std::vector sparseImageFormatPropVec = getPhysicalDeviceSparseImageFormatProperties( instance, physicalDevice, imageInfo.format, imageInfo.imageType, imageInfo.samples, imageInfo.usage, imageInfo.tiling); @@ -199,6 +199,16 @@ bool SparseResourcesBaseInstance::checkSparseSupportForImageFormat (const Instan return sparseImageFormatPropVec.size() > 0u; } +bool SparseResourcesBaseInstance::checkImageFormatFeatureSupport (const vk::InstanceInterface& instance, + const vk::VkPhysicalDevice physicalDevice, + const vk::VkFormat format, + const vk::VkFormatFeatureFlags featureFlags) const +{ + const VkFormatProperties formatProperties = getPhysicalDeviceFormatProperties(instance, physicalDevice, format); + + return (formatProperties.optimalTilingFeatures & featureFlags) == featureFlags; +} + deUint32 SparseResourcesBaseInstance::getSparseAspectRequirementsIndex (const std::vector& requirements, const VkImageAspectFlags aspectFlags) const { diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp index a543109..1c33042 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesBase.hpp @@ -100,6 +100,11 @@ protected: const vk::VkPhysicalDevice physicalDevice, const vk::VkImageCreateInfo& imageInfo) const; + bool checkImageFormatFeatureSupport (const vk::InstanceInterface& instance, + const vk::VkPhysicalDevice physicalDevice, + const vk::VkFormat format, + const vk::VkFormatFeatureFlags featureFlags) const; + deUint32 getSparseAspectRequirementsIndex (const std::vector&requirements, const vk::VkImageAspectFlags aspectFlags) const; diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.cpp new file mode 100644 index 0000000..7af6493 --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.cpp @@ -0,0 +1,140 @@ +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//* + * \file vktSparseResourcesShaderIntrinsics.cpp + * \brief Sparse Resources Shader Intrinsics + *//*--------------------------------------------------------------------*/ + +#include "vktSparseResourcesShaderIntrinsicsSampled.hpp" +#include "vktSparseResourcesShaderIntrinsicsStorage.hpp" + +using namespace vk; + +namespace vkt +{ +namespace sparse +{ + +tcu::TestCaseGroup* createSparseResourcesShaderIntrinsicsTests (tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "shader_intrinsics", "Sparse Resources Shader Intrinsics")); + + static const deUint32 sizeCountPerImageType = 4u; + + struct ImageParameters + { + ImageType imageType; + tcu::UVec3 imageSizes[sizeCountPerImageType]; + }; + + static const ImageParameters imageParametersArray[] = + { + { IMAGE_TYPE_2D, { tcu::UVec3(512u, 256u, 1u), tcu::UVec3(128u, 128u, 1u), tcu::UVec3(503u, 137u, 1u), tcu::UVec3(11u, 37u, 1u) } }, + { IMAGE_TYPE_2D_ARRAY, { tcu::UVec3(512u, 256u, 6u), tcu::UVec3(128u, 128u, 8u), tcu::UVec3(503u, 137u, 3u), tcu::UVec3(11u, 37u, 3u) } }, + { IMAGE_TYPE_CUBE, { tcu::UVec3(256u, 256u, 1u), tcu::UVec3(128u, 128u, 1u), tcu::UVec3(137u, 137u, 1u), tcu::UVec3(11u, 11u, 1u) } }, + { IMAGE_TYPE_CUBE_ARRAY,{ tcu::UVec3(256u, 256u, 6u), tcu::UVec3(128u, 128u, 8u), tcu::UVec3(137u, 137u, 3u), tcu::UVec3(11u, 11u, 3u) } }, + { IMAGE_TYPE_3D, { tcu::UVec3(256u, 256u, 16u), tcu::UVec3(128u, 128u, 8u), tcu::UVec3(503u, 137u, 3u), tcu::UVec3(11u, 37u, 3u) } } + }; + + static const tcu::TextureFormat formats[] = + { + tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT32), + tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT16), + tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT8), + tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT32), + tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT16), + tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8) + }; + + static const std::string functions[SPARSE_SPIRV_FUNCTION_TYPE_LAST] = + { + "_sparse_fetch", + "_sparse_read", + "_sparse_sample_explicit_lod", + "_sparse_sample_implicit_lod", + "_sparse_gather", + }; + + for (deUint32 functionNdx = 0; functionNdx < SPARSE_SPIRV_FUNCTION_TYPE_LAST; ++functionNdx) + { + const SpirVFunction function = static_cast(functionNdx); + + for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); ++imageTypeNdx) + { + const ImageType imageType = imageParametersArray[imageTypeNdx].imageType; + de::MovePtr imageTypeGroup(new tcu::TestCaseGroup(testCtx, (getImageTypeName(imageType) + functions[functionNdx]).c_str(), "")); + + for (deInt32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx) + { + const tcu::TextureFormat& format = formats[formatNdx]; + de::MovePtr formatGroup(new tcu::TestCaseGroup(testCtx, getShaderImageFormatQualifier(format).c_str(), "")); + + for (deInt32 imageSizeNdx = 0; imageSizeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray[imageTypeNdx].imageSizes); ++imageSizeNdx) + { + const tcu::UVec3 imageSize = imageParametersArray[imageTypeNdx].imageSizes[imageSizeNdx]; + + std::ostringstream stream; + stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z(); + + switch (function) + { + case SPARSE_FETCH: + if ((imageType == IMAGE_TYPE_CUBE) || (imageType == IMAGE_TYPE_CUBE_ARRAY)) continue; + case SPARSE_SAMPLE_EXPLICIT_LOD: + case SPARSE_SAMPLE_IMPLICIT_LOD: + case SPARSE_GATHER: + if ((imageType == IMAGE_TYPE_CUBE) || (imageType == IMAGE_TYPE_CUBE_ARRAY) || (imageType == IMAGE_TYPE_3D)) continue; + break; + default: + break; + } + + switch (function) + { + case SPARSE_FETCH: + formatGroup->addChild(new SparseCaseOpImageSparseFetch(testCtx, stream.str(), function, imageType, imageSize, format)); + break; + case SPARSE_READ: + formatGroup->addChild(new SparseCaseOpImageSparseRead(testCtx, stream.str(), function, imageType, imageSize, format)); + break; + case SPARSE_SAMPLE_EXPLICIT_LOD: + formatGroup->addChild(new SparseCaseOpImageSparseSampleExplicitLod(testCtx, stream.str(), function, imageType, imageSize, format)); + break; + case SPARSE_SAMPLE_IMPLICIT_LOD: + formatGroup->addChild(new SparseCaseOpImageSparseSampleImplicitLod(testCtx, stream.str(), function, imageType, imageSize, format)); + break; + case SPARSE_GATHER: + formatGroup->addChild(new SparseCaseOpImageSparseGather(testCtx, stream.str(), function, imageType, imageSize, format)); + break; + default: + DE_ASSERT(0); + break; + } + } + imageTypeGroup->addChild(formatGroup.release()); + } + testGroup->addChild(imageTypeGroup.release()); + } + } + + return testGroup.release(); +} + +} // sparse +} // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.hpp new file mode 100644 index 0000000..53ada12 --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsics.hpp @@ -0,0 +1,39 @@ +#ifndef _VKTSPARSERESOURCESSHADERINTRINSICS_HPP +#define _VKTSPARSERESOURCESSHADERINTRINSICS_HPP +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//*! + * \file vktSparseResourcesShaderIntrinsics.hpp + * \brief Sparse Resources Shader Intrinsics + *//*--------------------------------------------------------------------*/ + +#include "tcuDefs.hpp" +#include "vktTestCase.hpp" + +namespace vkt +{ +namespace sparse +{ + +tcu::TestCaseGroup* createSparseResourcesShaderIntrinsicsTests(tcu::TestContext& testCtx); + +} // sparse +} // vkt + +#endif // _VKTSPARSERESOURCESSHADERINTRINSICS_HPP \ No newline at end of file diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.cpp new file mode 100644 index 0000000..3178fce --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.cpp @@ -0,0 +1,627 @@ +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//* + * \file vktSparseResourcesShaderIntrinsicsBase.cpp + * \brief Sparse Resources Shader Intrinsics Base Classes + *//*--------------------------------------------------------------------*/ + +#include "vktSparseResourcesShaderIntrinsicsBase.hpp" + +using namespace vk; + +namespace vkt +{ +namespace sparse +{ + +tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor) +{ + tcu::UVec3 result; + + result.x() = extent.width / divisor.width + ((extent.width % divisor.width) ? 1u : 0u); + result.y() = extent.height / divisor.height + ((extent.height % divisor.height) ? 1u : 0u); + result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth) ? 1u : 0u); + + return result; +} + +std::string getOpTypeImageComponent (const tcu::TextureFormat& format) +{ + switch (tcu::getTextureChannelClass(format.type)) + { + case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER: + return "OpTypeInt 32 0"; + case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER: + return "OpTypeInt 32 1"; + default: + DE_ASSERT(0); + return ""; + } +} + +std::string getOpTypeImageSparse (const ImageType imageType, + const tcu::TextureFormat& format, + const std::string& componentType, + const bool requiresSampler) +{ + std::ostringstream src; + + src << "OpTypeImage " << componentType << " "; + + switch (imageType) + { + case IMAGE_TYPE_1D : + src << "1D 0 0 0 "; + break; + case IMAGE_TYPE_1D_ARRAY : + src << "1D 0 1 0 "; + break; + case IMAGE_TYPE_2D : + src << "2D 0 0 0 "; + break; + case IMAGE_TYPE_2D_ARRAY : + src << "2D 0 1 0 "; + break; + case IMAGE_TYPE_3D : + src << "3D 0 0 0 "; + break; + case IMAGE_TYPE_CUBE : + src << "Cube 0 0 0 "; + break; + case IMAGE_TYPE_CUBE_ARRAY : + src << "Cube 0 1 0 "; + break; + default : + DE_ASSERT(0); + break; + }; + + if (requiresSampler) + src << "1 "; + else + src << "2 "; + + switch (format.order) + { + case tcu::TextureFormat::R: + src << "R"; + break; + case tcu::TextureFormat::RG: + src << "Rg"; + break; + case tcu::TextureFormat::RGB: + src << "Rgb"; + break; + case tcu::TextureFormat::RGBA: + src << "Rgba"; + break; + default: + DE_ASSERT(0); + break; + } + + switch (format.type) + { + case tcu::TextureFormat::SIGNED_INT8: + src << "8i"; + break; + case tcu::TextureFormat::SIGNED_INT16: + src << "16i"; + break; + case tcu::TextureFormat::SIGNED_INT32: + src << "32i"; + break; + case tcu::TextureFormat::UNSIGNED_INT8: + src << "8ui"; + break; + case tcu::TextureFormat::UNSIGNED_INT16: + src << "16ui"; + break; + case tcu::TextureFormat::UNSIGNED_INT32: + src << "32ui"; + break; + default: + DE_ASSERT(0); + break; + }; + + return src.str(); +} + +std::string getOpTypeImageResidency (const ImageType imageType) +{ + std::ostringstream src; + + src << "OpTypeImage %type_uint "; + + switch (imageType) + { + case IMAGE_TYPE_1D : + src << "1D 0 0 0 2 R32ui"; + break; + case IMAGE_TYPE_1D_ARRAY : + src << "1D 0 1 0 2 R32ui"; + break; + case IMAGE_TYPE_2D : + src << "2D 0 0 0 2 R32ui"; + break; + case IMAGE_TYPE_2D_ARRAY : + src << "2D 0 1 0 2 R32ui"; + break; + case IMAGE_TYPE_3D : + src << "3D 0 0 0 2 R32ui"; + break; + case IMAGE_TYPE_CUBE : + src << "Cube 0 0 0 2 R32ui"; + break; + case IMAGE_TYPE_CUBE_ARRAY : + src << "Cube 0 1 0 2 R32ui"; + break; + default : + DE_ASSERT(0); + break; + }; + + return src.str(); +} + +tcu::TestStatus SparseShaderIntrinsicsInstanceBase::iterate (void) +{ + const InstanceInterface& instance = m_context.getInstanceInterface(); + const DeviceInterface& deviceInterface = m_context.getDeviceInterface(); + const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + VkImageCreateInfo imageSparseInfo; + VkImageCreateInfo imageTexelsInfo; + VkImageCreateInfo imageResidencyInfo; + VkSparseImageMemoryRequirements aspectRequirements; + std::vector residencyReferenceData; + std::vector deviceMemUniquePtrVec; + + // Check if image size does not exceed device limits + if (!isImageSizeSupported(instance, physicalDevice, m_imageType, m_imageSize)) + TCU_THROW(NotSupportedError, "Image size not supported for device"); + + // Check if device supports sparse operations for image type + if (!checkSparseSupportForImageType(instance, physicalDevice, m_imageType)) + TCU_THROW(NotSupportedError, "Sparse residency for image type is not supported"); + + imageSparseInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageSparseInfo.pNext = DE_NULL; + imageSparseInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT; + imageSparseInfo.imageType = mapImageType(m_imageType); + imageSparseInfo.format = mapTextureFormat(m_format); + imageSparseInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); + imageSparseInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); + imageSparseInfo.samples = VK_SAMPLE_COUNT_1_BIT; + imageSparseInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageSparseInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + imageSparseInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | imageSparseUsageFlags(); + imageSparseInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + imageSparseInfo.queueFamilyIndexCount = 0u; + imageSparseInfo.pQueueFamilyIndices = DE_NULL; + + if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + { + imageSparseInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; + } + + { + // Assign maximum allowed mipmap levels to image + VkImageFormatProperties imageFormatProperties; + instance.getPhysicalDeviceImageFormatProperties(physicalDevice, + imageSparseInfo.format, + imageSparseInfo.imageType, + imageSparseInfo.tiling, + imageSparseInfo.usage, + imageSparseInfo.flags, + &imageFormatProperties); + + imageSparseInfo.mipLevels = getImageMaxMipLevels(imageFormatProperties, imageSparseInfo.extent); + } + + // Check if device supports sparse operations for image format + if (!checkSparseSupportForImageFormat(instance, physicalDevice, imageSparseInfo)) + TCU_THROW(NotSupportedError, "The image format does not support sparse operations"); + + { + // Create logical device supporting both sparse and compute/graphics queues + QueueRequirementsVec queueRequirements; + queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); + queueRequirements.push_back(QueueRequirements(getQueueFlags(), 1u)); + + createDeviceSupportingQueues(queueRequirements); + } + + // Create queues supporting sparse binding operations and compute/graphics operations + const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); + const Queue& extractQueue = getQueue(getQueueFlags(), 0); + + // Create memory allocator for logical device + const de::UniquePtr allocator(new SimpleAllocator(deviceInterface, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instance, physicalDevice))); + + // Create sparse image + const Unique imageSparse(createImage(deviceInterface, *m_logicalDevice, &imageSparseInfo)); + + // Create sparse image memory bind semaphore + const Unique memoryBindSemaphore(makeSemaphore(deviceInterface, *m_logicalDevice)); + + const deUint32 imageSparseSizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, imageSparseInfo.mipLevels); + const deUint32 imageSizeInPixels = imageSparseSizeInBytes / tcu::getPixelSize(m_format); + + residencyReferenceData.assign(imageSizeInPixels, MEMORY_BLOCK_NOT_BOUND_VALUE); + + { + // Get sparse image general memory requirements + const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, *m_logicalDevice, *imageSparse); + + // Check if required image memory size does not exceed device limits + if (imageMemoryRequirements.size > getPhysicalDeviceProperties(instance, physicalDevice).limits.sparseAddressSpaceSize) + TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits"); + + DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); + + // Get sparse image sparse memory requirements + const std::vector sparseMemoryRequirements = getImageSparseMemoryRequirements(deviceInterface, *m_logicalDevice, *imageSparse); + + DE_ASSERT(sparseMemoryRequirements.size() != 0); + + const deUint32 colorAspectIndex = getSparseAspectRequirementsIndex(sparseMemoryRequirements, VK_IMAGE_ASPECT_COLOR_BIT); + + if (colorAspectIndex == NO_MATCH_FOUND) + TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); + + aspectRequirements = sparseMemoryRequirements[colorAspectIndex]; + + DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); + + const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; + const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity; + const deUint32 memoryType = findMatchingMemoryType(instance, physicalDevice, imageMemoryRequirements, MemoryRequirement::Any); + + if (memoryType == NO_MATCH_FOUND) + return tcu::TestStatus::fail("No matching memory type found"); + + deUint32 pixelOffset = 0u; + + std::vector imageResidencyMemoryBinds; + std::vector imageMipTailBinds; + + // Bind memory for each mipmap level + for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) + { + const deUint32 mipLevelSizeInPixels = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx) / tcu::getPixelSize(m_format); + + if (mipLevelNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_NOT_BOUND) + { + pixelOffset += mipLevelSizeInPixels; + continue; + } + + for (deUint32 pixelNdx = 0u; pixelNdx < mipLevelSizeInPixels; ++pixelNdx) + { + residencyReferenceData[pixelOffset + pixelNdx] = MEMORY_BLOCK_BOUND_VALUE; + } + + pixelOffset += mipLevelSizeInPixels; + + for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx) + { + const VkExtent3D mipExtent = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx); + const tcu::UVec3 sparseBlocks = alignedDivide(mipExtent, imageGranularity); + const deUint32 numSparseBlocks = sparseBlocks.x() * sparseBlocks.y() * sparseBlocks.z(); + const VkImageSubresource subresource = { aspectMask, mipLevelNdx, layerNdx }; + + const VkSparseImageMemoryBind imageMemoryBind = makeSparseImageMemoryBind(deviceInterface, *m_logicalDevice, + imageMemoryRequirements.alignment * numSparseBlocks, memoryType, subresource, makeOffset3D(0u, 0u, 0u), mipExtent); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMemoryBind.memory), Deleter(deviceInterface, *m_logicalDevice, DE_NULL)))); + + imageResidencyMemoryBinds.push_back(imageMemoryBind); + } + } + + if (aspectRequirements.imageMipTailFirstLod < imageSparseInfo.mipLevels) + { + if (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) + { + const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, *m_logicalDevice, + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, *m_logicalDevice, DE_NULL)))); + + imageMipTailBinds.push_back(imageMipTailMemoryBind); + } + else + { + for (deUint32 layerNdx = 0; layerNdx < imageSparseInfo.arrayLayers; ++layerNdx) + { + const VkSparseMemoryBind imageMipTailMemoryBind = makeSparseMemoryBind(deviceInterface, *m_logicalDevice, + aspectRequirements.imageMipTailSize, memoryType, aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(imageMipTailMemoryBind.memory), Deleter(deviceInterface, *m_logicalDevice, DE_NULL)))); + + imageMipTailBinds.push_back(imageMipTailMemoryBind); + } + } + + for (deUint32 pixelNdx = pixelOffset; pixelNdx < residencyReferenceData.size(); ++pixelNdx) + { + residencyReferenceData[pixelNdx] = MEMORY_BLOCK_BOUND_VALUE; + } + } + + VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 0u, //deUint32 bufferBindCount; + DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &memoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; + }; + + VkSparseImageMemoryBindInfo imageResidencyBindInfo; + VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo; + + if (imageResidencyMemoryBinds.size() > 0) + { + imageResidencyBindInfo.image = *imageSparse; + imageResidencyBindInfo.bindCount = static_cast(imageResidencyMemoryBinds.size()); + imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0]; + + bindSparseInfo.imageBindCount = 1u; + bindSparseInfo.pImageBinds = &imageResidencyBindInfo; + } + + if (imageMipTailBinds.size() > 0) + { + imageMipTailBindInfo.image = *imageSparse; + imageMipTailBindInfo.bindCount = static_cast(imageMipTailBinds.size()); + imageMipTailBindInfo.pBinds = &imageMipTailBinds[0]; + + bindSparseInfo.imageOpaqueBindCount = 1u; + bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo; + } + + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); + } + + // Create image to store texels copied from sparse image + imageTexelsInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + imageTexelsInfo.pNext = DE_NULL; + imageTexelsInfo.flags = 0u; + imageTexelsInfo.imageType = imageSparseInfo.imageType; + imageTexelsInfo.format = imageSparseInfo.format; + imageTexelsInfo.extent = imageSparseInfo.extent; + imageTexelsInfo.arrayLayers = imageSparseInfo.arrayLayers; + imageTexelsInfo.mipLevels = imageSparseInfo.mipLevels; + imageTexelsInfo.samples = imageSparseInfo.samples; + imageTexelsInfo.tiling = VK_IMAGE_TILING_OPTIMAL; + imageTexelsInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; + imageTexelsInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | imageOutputUsageFlags(); + imageTexelsInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + imageTexelsInfo.queueFamilyIndexCount = 0u; + imageTexelsInfo.pQueueFamilyIndices = DE_NULL; + + if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + { + imageTexelsInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; + } + + const de::UniquePtr imageTexels(new Image(deviceInterface, *m_logicalDevice, *allocator, imageTexelsInfo, MemoryRequirement::Any)); + + // Create image to store residency info copied from sparse image + imageResidencyInfo = imageTexelsInfo; + imageResidencyInfo.format = mapTextureFormat(m_residencyFormat); + + const de::UniquePtr imageResidency(new Image(deviceInterface, *m_logicalDevice, *allocator, imageResidencyInfo, MemoryRequirement::Any)); + + // Create command buffer for compute and transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, *m_logicalDevice, extractQueue.queueFamilyIndex)); + const Unique commandBuffer(makeCommandBuffer(deviceInterface, *m_logicalDevice, *commandPool)); + + // Start recording commands + beginCommandBuffer(deviceInterface, *commandBuffer); + + // Create input buffer + const VkBufferCreateInfo inputBufferCreateInfo = makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_SRC_BIT); + const de::UniquePtr inputBuffer(new Buffer(deviceInterface, *m_logicalDevice, *allocator, inputBufferCreateInfo, MemoryRequirement::HostVisible)); + + // Fill input buffer with reference data + std::vector referenceData; + referenceData.resize(imageSparseSizeInBytes); + + deUint32 bufferOffset = 0u; + for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + const deUint32 mipLevelSizeinBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx); + + for (deUint32 byteNdx = 0u; byteNdx < mipLevelSizeinBytes; ++byteNdx) + { + referenceData[bufferOffset + byteNdx] = (deUint8)(mipLevelNdx + byteNdx); + } + + bufferOffset += mipLevelSizeinBytes; + } + + deMemcpy(inputBuffer->getAllocation().getHostPtr(), &referenceData[0], imageSparseSizeInBytes); + flushMappedMemoryRange(deviceInterface, *m_logicalDevice, inputBuffer->getAllocation().getMemory(), inputBuffer->getAllocation().getOffset(), imageSparseSizeInBytes); + + { + // Prepare input buffer for data transfer operation + const VkBufferMemoryBarrier inputBufferBarrier = makeBufferMemoryBarrier + ( + VK_ACCESS_HOST_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + inputBuffer->get(), + 0u, + imageSparseSizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 1u, &inputBufferBarrier, 0u, DE_NULL); + } + + const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers); + + { + // Prepare sparse image for data transfer operation + const VkImageMemoryBarrier imageSparseTransferDstBarrier = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? sparseQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + sparseQueue.queueFamilyIndex != extractQueue.queueFamilyIndex ? extractQueue.queueFamilyIndex : VK_QUEUE_FAMILY_IGNORED, + *imageSparse, + fullImageSubresourceRange + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &imageSparseTransferDstBarrier); + } + + // Copy reference data from input buffer to sparse image + std::vector bufferImageCopy; + bufferImageCopy.resize(imageSparseInfo.mipLevels); + + bufferOffset = 0u; + for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + bufferImageCopy[mipLevelNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipLevelNdx), imageSparseInfo.arrayLayers, mipLevelNdx, bufferOffset); + bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx); + } + + deviceInterface.cmdCopyBufferToImage(*commandBuffer, inputBuffer->get(), *imageSparse, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + + recordCommands(*allocator, *commandBuffer, imageSparseInfo, *imageSparse, imageTexels->get(), imageResidency->get()); + + const VkBufferCreateInfo bufferTexelsInfo = makeBufferCreateInfo(imageSparseSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const de::UniquePtr bufferTexels(new Buffer(deviceInterface, *m_logicalDevice, *allocator, bufferTexelsInfo, MemoryRequirement::HostVisible)); + + // Copy data from texels image to buffer + deviceInterface.cmdCopyImageToBuffer(*commandBuffer, imageTexels->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bufferTexels->get(), static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + + const deUint32 imageResidencySizeInBytes = getImageSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, imageSparseInfo.mipLevels); + + const VkBufferCreateInfo bufferResidencyInfo = makeBufferCreateInfo(imageResidencySizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + const de::UniquePtr bufferResidency(new Buffer(deviceInterface, *m_logicalDevice, *allocator, bufferResidencyInfo, MemoryRequirement::HostVisible)); + + // Copy data from residency image to buffer + bufferOffset = 0u; + for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + bufferImageCopy[mipLevelNdx] = makeBufferImageCopy(mipLevelExtents(imageSparseInfo.extent, mipLevelNdx), imageSparseInfo.arrayLayers, mipLevelNdx, bufferOffset); + bufferOffset += getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_residencyFormat, mipLevelNdx); + } + + deviceInterface.cmdCopyImageToBuffer(*commandBuffer, imageResidency->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, bufferResidency->get(), static_cast(bufferImageCopy.size()), &bufferImageCopy[0]); + + { + VkBufferMemoryBarrier bufferOutputHostReadBarriers[2]; + + bufferOutputHostReadBarriers[0] = makeBufferMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + bufferTexels->get(), + 0u, + imageSparseSizeInBytes + ); + + bufferOutputHostReadBarriers[1] = makeBufferMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_HOST_READ_BIT, + bufferResidency->get(), + 0u, + imageResidencySizeInBytes + ); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 2u, bufferOutputHostReadBarriers, 0u, DE_NULL); + } + + // End recording commands + endCommandBuffer(deviceInterface, *commandBuffer); + + const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TRANSFER_BIT }; + + // Submit commands for execution and wait for completion + submitCommandsAndWait(deviceInterface, *m_logicalDevice, extractQueue.queueHandle, *commandBuffer, 1u, &memoryBindSemaphore.get(), stageBits); + + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + + // Retrieve data from residency buffer to host memory + const Allocation& bufferResidencyAllocation = bufferResidency->getAllocation(); + invalidateMappedMemoryRange(deviceInterface, *m_logicalDevice, bufferResidencyAllocation.getMemory(), bufferResidencyAllocation.getOffset(), imageResidencySizeInBytes); + + const deUint32* bufferResidencyData = static_cast(bufferResidencyAllocation.getHostPtr()); + + if (deMemCmp(&bufferResidencyData[0], &residencyReferenceData[0], imageResidencySizeInBytes) != 0) + return tcu::TestStatus::fail("Failed"); + + + // Retrieve data from texels buffer to host memory + const Allocation& bufferTexelsAllocation = bufferTexels->getAllocation(); + invalidateMappedMemoryRange(deviceInterface, *m_logicalDevice, bufferTexelsAllocation.getMemory(), bufferTexelsAllocation.getOffset(), imageSparseSizeInBytes); + + const deUint8* bufferTexelsData = static_cast(bufferTexelsAllocation.getHostPtr()); + + deUint32 dataOffset = 0u; + for (deUint32 mipLevelNdx = 0; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + const deUint32 mipLevelSizeInBytes = getImageMipLevelSizeInBytes(imageSparseInfo.extent, imageSparseInfo.arrayLayers, m_format, mipLevelNdx); + + if (mipLevelNdx < aspectRequirements.imageMipTailFirstLod) + { + if (mipLevelNdx % MEMORY_BLOCK_TYPE_COUNT == MEMORY_BLOCK_BOUND) + { + if (deMemCmp(&bufferTexelsData[dataOffset], &referenceData[dataOffset], mipLevelSizeInBytes) != 0) + return tcu::TestStatus::fail("Failed"); + } + else if (getPhysicalDeviceProperties(instance, physicalDevice).sparseProperties.residencyNonResidentStrict) + { + std::vector zeroData; + zeroData.assign(mipLevelSizeInBytes, 0u); + + if (deMemCmp(&bufferTexelsData[dataOffset], &zeroData[0], mipLevelSizeInBytes) != 0) + return tcu::TestStatus::fail("Failed"); + } + } + else + { + if (deMemCmp(&bufferTexelsData[dataOffset], &referenceData[dataOffset], mipLevelSizeInBytes) != 0) + return tcu::TestStatus::fail("Failed"); + } + + dataOffset += mipLevelSizeInBytes; + } + + return tcu::TestStatus::pass("Passed"); +} + +} // sparse +} // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.hpp new file mode 100644 index 0000000..f459aef --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsBase.hpp @@ -0,0 +1,176 @@ +#ifndef _VKTSPARSERESOURCESSHADERINTRINSICSBASE_HPP +#define _VKTSPARSERESOURCESSHADERINTRINSICSBASE_HPP +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//*! + * \file vktSparseResourcesShaderIntrinsicsBase.hpp + * \brief Sparse Resources Shader Intrinsics Base Classes + *//*--------------------------------------------------------------------*/ + +#include "tcuDefs.hpp" +#include "vktTestCase.hpp" +#include "vktTestCaseUtil.hpp" +#include "vktSparseResourcesBase.hpp" +#include "vktSparseResourcesTestsUtil.hpp" + +#include "vkDefs.hpp" +#include "vkRef.hpp" +#include "vkRefUtil.hpp" +#include "vkPlatform.hpp" +#include "vkPrograms.hpp" +#include "vkRefUtil.hpp" +#include "vkMemUtil.hpp" +#include "vkQueryUtil.hpp" +#include "vkBuilderUtil.hpp" +#include "vkTypeUtil.hpp" +#include "vkDebugReportUtil.hpp" +#include "tcuTextureUtil.hpp" + +#include "deStringUtil.hpp" +#include "deUniquePtr.hpp" +#include "deSharedPtr.hpp" + +#include +#include + +#include + +namespace vkt +{ +namespace sparse +{ + +enum +{ + MEMORY_BLOCK_BOUND = 0u, + MEMORY_BLOCK_NOT_BOUND = 1u, + MEMORY_BLOCK_TYPE_COUNT = 2u +}; + +enum +{ + MEMORY_BLOCK_BOUND_VALUE = 1u, + MEMORY_BLOCK_NOT_BOUND_VALUE = 2u +}; + +enum +{ + BINDING_IMAGE_SPARSE = 0u, + BINDING_IMAGE_TEXELS = 1u, + BINDING_IMAGE_RESIDENCY = 2u +}; + +enum SpirVFunction +{ + SPARSE_FETCH = 0u, + SPARSE_READ, + SPARSE_SAMPLE_EXPLICIT_LOD, + SPARSE_SAMPLE_IMPLICIT_LOD, + SPARSE_GATHER, + SPARSE_SPIRV_FUNCTION_TYPE_LAST +}; + +std::string getOpTypeImageComponent (const tcu::TextureFormat& format); + +std::string getOpTypeImageSparse (const ImageType imageType, + const tcu::TextureFormat& format, + const std::string& componentType, + const bool requiresSampler); + +std::string getOpTypeImageResidency (const ImageType imageType); + +class SparseShaderIntrinsicsCaseBase : public TestCase +{ +public: + SparseShaderIntrinsicsCaseBase (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : TestCase(testCtx, name, "") + , m_function(function) + , m_imageType(imageType) + , m_imageSize(imageSize) + , m_format(format) + { + } + +protected: + const SpirVFunction m_function; + const ImageType m_imageType; + const tcu::UVec3 m_imageSize; + const tcu::TextureFormat m_format; +}; + +class SparseShaderIntrinsicsInstanceBase : public SparseResourcesBaseInstance +{ +public: + SparseShaderIntrinsicsInstanceBase (Context& context, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseResourcesBaseInstance(context) + , m_function(function) + , m_imageType(imageType) + , m_imageSize(imageSize) + , m_format(format) + , m_residencyFormat(tcu::TextureFormat::R, tcu::TextureFormat::UNSIGNED_INT32) + { + } + + tcu::TestStatus iterate (void); + + virtual vk::VkImageUsageFlags imageSparseUsageFlags (void) const = 0; + virtual vk::VkImageUsageFlags imageOutputUsageFlags (void) const = 0; + + virtual vk::VkQueueFlags getQueueFlags (void) const = 0; + + virtual void recordCommands (vk::Allocator& allocator, + const vk::VkCommandBuffer commandBuffer, + const vk::VkImageCreateInfo& imageSparseInfo, + const vk::VkImage imageSparse, + const vk::VkImage imageTexels, + const vk::VkImage imageResidency) = 0; +protected: + const SpirVFunction m_function; + const ImageType m_imageType; + const tcu::UVec3 m_imageSize; + const tcu::TextureFormat m_format; + const tcu::TextureFormat m_residencyFormat; + + typedef de::SharedPtr< vk::Unique > SharedVkPipeline; + std::vector pipelines; + + typedef de::SharedPtr< vk::Unique > SharedVkImageView; + std::vector imageSparseViews; + std::vector imageTexelsViews; + std::vector imageResidencyViews; + + vk::Move descriptorPool; + + typedef de::SharedPtr< vk::Unique > SharedVkDescriptorSet; + std::vector descriptorSets; +}; + +} // sparse +} // vkt + +#endif // _VKTSPARSERESOURCESSHADERINTRINSICSBASE_HPP \ No newline at end of file diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.cpp new file mode 100644 index 0000000..502dda2 --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.cpp @@ -0,0 +1,793 @@ +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//* + * \file vktSparseResourcesShaderIntrinsicsSampled.cpp + * \brief Sparse Resources Shader Intrinsics for sampled images + *//*--------------------------------------------------------------------*/ + +#include "vktSparseResourcesShaderIntrinsicsSampled.hpp" + +using namespace vk; + +namespace vkt +{ +namespace sparse +{ + +void SparseShaderIntrinsicsCaseSampledBase::initPrograms (vk::SourceCollections& programCollection) const +{ + const deUint32 numLayers = getNumLayers(m_imageType, m_imageSize); + const std::string coordString = getShaderImageCoordinates(m_imageType, "%local_texCoord_x", "%local_texCoord_xy", "%local_texCoord_xyz"); + + // Create vertex shader + std::ostringstream vs; + + vs << "#version 440\n" + << "layout(location = 0) in highp vec2 vs_in_position;\n" + << "layout(location = 1) in highp vec2 vs_in_texCoord;\n" + << "\n" + << "layout(location = 0) out highp vec3 vs_out_texCoord;\n" + << "\n" + << "out gl_PerVertex {\n" + << " vec4 gl_Position;\n" + << "};\n" + << "void main (void)\n" + << "{\n" + << " gl_Position = vec4(vs_in_position, 0.0f, 1.0f);\n" + << " vs_out_texCoord = vec3(vs_in_texCoord, 0.0f);\n" + << "}\n"; + + programCollection.glslSources.add("vertex_shader") << glu::VertexSource(vs.str()); + + if (numLayers > 1u) + { + const deInt32 maxVertices = 3u * numLayers; + + // Create geometry shader + std::ostringstream gs; + + gs << "#version 440\n" + << "layout(triangles) in;\n" + << "layout(triangle_strip, max_vertices = " << static_cast(maxVertices) << ") out;\n" + << "\n" + << "in gl_PerVertex {\n" + << " vec4 gl_Position;\n" + << "} gl_in[];\n" + << "out gl_PerVertex {\n" + << " vec4 gl_Position;\n" + << "};\n" + << "layout(location = 0) in highp vec3 gs_in_texCoord[];\n" + << "\n" + << "layout(location = 0) out highp vec3 gs_out_texCoord;\n" + << "\n" + << "void main (void)\n" + << "{\n" + << " for (int layerNdx = 0; layerNdx < " << static_cast(numLayers) << "; ++layerNdx)\n" + << " {\n" + << " for (int vertexNdx = 0; vertexNdx < gl_in.length(); ++vertexNdx)\n" + << " {\n" + << " gl_Layer = layerNdx;\n" + << " gl_Position = gl_in[vertexNdx].gl_Position;\n" + << " gs_out_texCoord = vec3(gs_in_texCoord[vertexNdx].xy, float(layerNdx));\n" + << " EmitVertex();\n" + << " }\n" + << " EndPrimitive();\n" + << " }\n" + << "}\n"; + + programCollection.glslSources.add("geometry_shader") << glu::GeometrySource(gs.str()); + } + + // Create fragment shader + std::ostringstream fs; + + fs << "OpCapability Shader\n" + << "OpCapability SampledCubeArray\n" + << "OpCapability ImageCubeArray\n" + << "OpCapability SparseResidency\n" + << "OpCapability StorageImageExtendedFormats\n" + + << "%ext_import = OpExtInstImport \"GLSL.std.450\"\n" + << "OpMemoryModel Logical GLSL450\n" + << "OpEntryPoint Fragment %func_main \"main\" %varying_texCoord %output_texel %output_residency\n" + << "OpExecutionMode %func_main OriginUpperLeft\n" + << "OpSource GLSL 440\n" + + << "OpName %func_main \"main\"\n" + + << "OpName %varying_texCoord \"varying_texCoord\"\n" + + << "OpName %output_texel \"out_texel\"\n" + << "OpName %output_residency \"out_residency\"\n" + + << "OpName %type_uniformblock_lod \"LodBlock\"\n" + << "OpMemberName %type_uniformblock_lod 0 \"lod\"\n" + << "OpName %uniformblock_lod_instance \"lodInstance\"\n" + + << "OpName %uniformconst_image_sparse \"u_imageSparse\"\n" + + << "OpDecorate %varying_texCoord Location 0\n" + + << "OpDecorate %output_texel Location 0\n" + << "OpDecorate %output_residency Location 1\n" + + << "OpDecorate %type_uniformblock_lod Block\n" + << "OpMemberDecorate %type_uniformblock_lod 0 Offset 0\n" + + << "OpDecorate %uniformconst_image_sparse DescriptorSet 0\n" + << "OpDecorate %uniformconst_image_sparse Binding " << BINDING_IMAGE_SPARSE << "\n" + + << "%type_void = OpTypeVoid\n" + << "%type_void_func = OpTypeFunction %type_void\n" + + << "%type_bool = OpTypeBool\n" + << "%type_int = OpTypeInt 32 1\n" + << "%type_uint = OpTypeInt 32 0\n" + << "%type_float = OpTypeFloat 32\n" + << "%type_vec2 = OpTypeVector %type_float 2\n" + << "%type_vec3 = OpTypeVector %type_float 3\n" + << "%type_vec4 = OpTypeVector %type_float 4\n" + << "%type_uniformblock_lod = OpTypeStruct %type_uint\n" + << "%type_img_comp = " << getOpTypeImageComponent(m_format) << "\n" + << "%type_img_comp_vec4 = OpTypeVector %type_img_comp 4\n" + << "%type_struct_int_img_comp_vec4 = OpTypeStruct %type_int %type_img_comp_vec4\n" + + << "%type_input_vec3 = OpTypePointer Input %type_vec3\n" + << "%type_input_float = OpTypePointer Input %type_float\n" + + << "%type_output_img_comp_vec4 = OpTypePointer Output %type_img_comp_vec4\n" + << "%type_output_uint = OpTypePointer Output %type_uint\n" + + << "%type_function_int = OpTypePointer Function %type_int\n" + << "%type_function_img_comp = OpTypePointer Function %type_img_comp\n" + << "%type_function_img_comp_vec4 = OpTypePointer Function %type_img_comp_vec4\n" + << "%type_function_int_img_comp_vec4 = OpTypePointer Function %type_struct_int_img_comp_vec4\n" + + << "%type_pushconstant_uniformblock_lod = OpTypePointer PushConstant %type_uniformblock_lod\n" + << "%type_pushconstant_uniformblock_member_lod = OpTypePointer PushConstant %type_uint\n" + + << "%type_image_sparse = " << getOpTypeImageSparse(m_imageType, m_format, "%type_img_comp", true) << "\n" + << "%type_sampled_image_sparse = OpTypeSampledImage %type_image_sparse\n" + << "%type_uniformconst_image_sparse = OpTypePointer UniformConstant %type_sampled_image_sparse\n" + + << "%varying_texCoord = OpVariable %type_input_vec3 Input\n" + + << "%output_texel = OpVariable %type_output_img_comp_vec4 Output\n" + << "%output_residency = OpVariable %type_output_uint Output\n" + + << "%uniformconst_image_sparse = OpVariable %type_uniformconst_image_sparse UniformConstant\n" + + << "%uniformblock_lod_instance = OpVariable %type_pushconstant_uniformblock_lod PushConstant\n" + + // Declare constants + << "%constant_uint_0 = OpConstant %type_uint 0\n" + << "%constant_uint_1 = OpConstant %type_uint 1\n" + << "%constant_uint_2 = OpConstant %type_uint 2\n" + << "%constant_uint_3 = OpConstant %type_uint 3\n" + << "%constant_int_0 = OpConstant %type_int 0\n" + << "%constant_int_1 = OpConstant %type_int 1\n" + << "%constant_int_2 = OpConstant %type_int 2\n" + << "%constant_int_3 = OpConstant %type_int 3\n" + << "%constant_texel_resident = OpConstant %type_uint " << MEMORY_BLOCK_BOUND_VALUE << "\n" + << "%constant_texel_not_resident = OpConstant %type_uint " << MEMORY_BLOCK_NOT_BOUND_VALUE << "\n" + + // Call main function + << "%func_main = OpFunction %type_void None %type_void_func\n" + << "%label_func_main = OpLabel\n" + + << "%local_image_sparse = OpLoad %type_sampled_image_sparse %uniformconst_image_sparse\n" + + << "%local_texCoord_x = OpCompositeExtract %type_float %varying_texCoord 0\n" + << "%local_texCoord_y = OpCompositeExtract %type_float %varying_texCoord 1\n" + << "%local_texCoord_z = OpCompositeExtract %type_float %varying_texCoord 2\n" + + << "%local_texCoord_xy = OpCompositeConstruct %type_vec2 %local_texCoord_x %local_texCoord_y\n" + << "%local_texCoord_xyz = OpCompositeConstruct %type_vec3 %local_texCoord_x %local_texCoord_y %local_texCoord_z\n" + + << "%access_uniformblock_member_uint_lod = OpAccessChain %type_pushconstant_uniformblock_member_lod %uniformblock_lod_instance %constant_int_0\n" + << "%local_uniformblock_member_uint_lod = OpLoad %type_uint %access_uniformblock_member_uint_lod\n" + << "%local_uniformblock_member_float_lod = OpConvertUToF %type_float %local_uniformblock_member_uint_lod\n" + + << sparseImageOpString("%local_sparse_op_result", "%type_struct_int_img_comp_vec4", "%local_image_sparse", coordString, "%local_uniformblock_member_float_lod") << "\n" + + // Load texel value + << "%local_img_comp_vec4 = OpCompositeExtract %type_img_comp_vec4 %local_sparse_op_result 1\n" + + << "OpStore %output_texel %local_img_comp_vec4\n" + + // Load residency code + << "%local_residency_code = OpCompositeExtract %type_int %local_sparse_op_result 0\n" + + // Check if loaded texel is placed in resident memory + << "%local_texel_resident = OpImageSparseTexelsResident %type_bool %local_residency_code\n" + << "OpSelectionMerge %branch_texel_resident None\n" + << "OpBranchConditional %local_texel_resident %label_texel_resident %label_texel_not_resident\n" + << "%label_texel_resident = OpLabel\n" + + // Loaded texel is in resident memory + << "OpStore %output_residency %constant_texel_resident\n" + + << "OpBranch %branch_texel_resident\n" + << "%label_texel_not_resident = OpLabel\n" + + // Loaded texel is not in resident memory + << "OpStore %output_residency %constant_texel_not_resident\n" + + << "OpBranch %branch_texel_resident\n" + << "%branch_texel_resident = OpLabel\n" + + << "OpReturn\n" + << "OpFunctionEnd\n"; + + programCollection.spirvAsmSources.add("fragment_shader") << fs.str(); +} + +std::string SparseCaseOpImageSparseSampleExplicitLod::sparseImageOpString (const std::string& resultVariable, + const std::string& resultType, + const std::string& image, + const std::string& coord, + const std::string& miplevel) const +{ + std::ostringstream src; + + src << resultVariable << " = OpImageSparseSampleExplicitLod " << resultType << " " << image << " " << coord << " Lod " << miplevel << "\n"; + + return src.str(); +} + +std::string SparseCaseOpImageSparseSampleImplicitLod::sparseImageOpString (const std::string& resultVariable, + const std::string& resultType, + const std::string& image, + const std::string& coord, + const std::string& miplevel) const +{ + DE_UNREF(miplevel); + + std::ostringstream src; + + src << resultVariable << " = OpImageSparseSampleImplicitLod " << resultType << " " << image << " " << coord << "\n"; + + return src.str(); +} + +std::string SparseCaseOpImageSparseGather::sparseImageOpString (const std::string& resultVariable, + const std::string& resultType, + const std::string& image, + const std::string& coord, + const std::string& miplevel) const +{ + DE_UNREF(miplevel); + + std::ostringstream src; + + src << "%local_sparse_gather_result_x = OpImageSparseGather " << resultType << " " << image << " " << coord << " %constant_int_0\n"; + src << "%local_sparse_gather_result_y = OpImageSparseGather " << resultType << " " << image << " " << coord << " %constant_int_1\n"; + src << "%local_sparse_gather_result_z = OpImageSparseGather " << resultType << " " << image << " " << coord << " %constant_int_2\n"; + src << "%local_sparse_gather_result_w = OpImageSparseGather " << resultType << " " << image << " " << coord << " %constant_int_3\n"; + + src << "%local_gather_residency_code = OpCompositeExtract %type_int %local_sparse_gather_result_x 0\n"; + + src << "%local_gather_texels_x = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_x 1\n"; + src << "%local_gather_texels_y = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_y 1\n"; + src << "%local_gather_texels_z = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_z 1\n"; + src << "%local_gather_texels_w = OpCompositeExtract %type_img_comp_vec4 %local_sparse_gather_result_w 1\n"; + + src << "%local_gather_primary_texel_x = OpCompositeExtract %type_img_comp %local_gather_texels_x 3\n"; + src << "%local_gather_primary_texel_y = OpCompositeExtract %type_img_comp %local_gather_texels_y 3\n"; + src << "%local_gather_primary_texel_z = OpCompositeExtract %type_img_comp %local_gather_texels_z 3\n"; + src << "%local_gather_primary_texel_w = OpCompositeExtract %type_img_comp %local_gather_texels_w 3\n"; + + src << "%local_gather_primary_texel = OpCompositeConstruct %type_img_comp_vec4 %local_gather_primary_texel_x %local_gather_primary_texel_y %local_gather_primary_texel_z %local_gather_primary_texel_w\n"; + src << resultVariable << " = OpCompositeConstruct " << resultType << " %local_gather_residency_code %local_gather_primary_texel\n"; + + return src.str(); +} + +class SparseShaderIntrinsicsInstanceSampledBase : public SparseShaderIntrinsicsInstanceBase +{ +public: + SparseShaderIntrinsicsInstanceSampledBase (Context& context, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsInstanceBase(context, function, imageType, imageSize, format) {} + + VkImageUsageFlags imageSparseUsageFlags (void) const; + VkImageUsageFlags imageOutputUsageFlags (void) const; + + VkQueueFlags getQueueFlags (void) const; + + void recordCommands (vk::Allocator& allocator, + const VkCommandBuffer commandBuffer, + const VkImageCreateInfo& imageSparseInfo, + const VkImage imageSparse, + const VkImage imageTexels, + const VkImage imageResidency); + + virtual VkImageSubresourceRange sampledImageRangeToBind(const VkImageCreateInfo& imageSparseInfo, const deUint32 mipLevel) const = 0; + +private: + + typedef de::SharedPtr< vk::Unique > SharedVkFramebuffer; + + de::SharedPtr vertexBuffer; + std::vector framebuffers; + Move renderPass; + Move sampler; +}; + +VkImageUsageFlags SparseShaderIntrinsicsInstanceSampledBase::imageSparseUsageFlags (void) const +{ + return VK_IMAGE_USAGE_SAMPLED_BIT; +} + +VkImageUsageFlags SparseShaderIntrinsicsInstanceSampledBase::imageOutputUsageFlags (void) const +{ + return VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +} + +VkQueueFlags SparseShaderIntrinsicsInstanceSampledBase::getQueueFlags (void) const +{ + return VK_QUEUE_GRAPHICS_BIT; +} + +void SparseShaderIntrinsicsInstanceSampledBase::recordCommands (vk::Allocator& allocator, + const VkCommandBuffer commandBuffer, + const VkImageCreateInfo& imageSparseInfo, + const VkImage imageSparse, + const VkImage imageTexels, + const VkImage imageResidency) +{ + const InstanceInterface& instance = m_context.getInstanceInterface(); + const DeviceInterface& deviceInterface = m_context.getDeviceInterface(); + const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice); + + if (imageSparseInfo.extent.width > deviceProperties.limits.maxFramebufferWidth || + imageSparseInfo.extent.height > deviceProperties.limits.maxFramebufferHeight || + imageSparseInfo.arrayLayers > deviceProperties.limits.maxFramebufferLayers) + { + TCU_THROW(NotSupportedError, "Image size exceeds allowed framebuffer dimensions"); + } + + // Check if device supports image format for sampled images + if (!checkImageFormatFeatureSupport(instance, physicalDevice, imageSparseInfo.format, VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) + TCU_THROW(NotSupportedError, "Device does not support image format for sampled images"); + + // Check if device supports image format for color attachment + if (!checkImageFormatFeatureSupport(instance, physicalDevice, imageSparseInfo.format, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) + TCU_THROW(NotSupportedError, "Device does not support image format for color attachment"); + + // Make sure device supports VK_FORMAT_R32_UINT format for color attachment + if (!checkImageFormatFeatureSupport(instance, physicalDevice, mapTextureFormat(m_residencyFormat), VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) + TCU_THROW(TestError, "Device does not support VK_FORMAT_R32_UINT format for color attachment"); + + // Create buffer storing vertex data + std::vector vertexData; + + vertexData.push_back(tcu::Vec2(-1.0f,-1.0f)); + vertexData.push_back(tcu::Vec2( 0.0f, 0.0f)); + + vertexData.push_back(tcu::Vec2(-1.0f, 1.0f)); + vertexData.push_back(tcu::Vec2( 0.0f, 1.0f)); + + vertexData.push_back(tcu::Vec2( 1.0f,-1.0f)); + vertexData.push_back(tcu::Vec2( 1.0f, 0.0f)); + + vertexData.push_back(tcu::Vec2( 1.0f, 1.0f)); + vertexData.push_back(tcu::Vec2( 1.0f, 1.0f)); + + const VkFormat vertexFormatPosition = VK_FORMAT_R32G32_SFLOAT; + const VkFormat vertexFormatTexCoord = VK_FORMAT_R32G32_SFLOAT; + + const deUint32 vertexSizePosition = tcu::getPixelSize(mapVkFormat(vertexFormatPosition)); + const deUint32 vertexSizeTexCoord = tcu::getPixelSize(mapVkFormat(vertexFormatTexCoord)); + + const VkDeviceSize vertexBufferStartOffset = 0ull; + const deUint32 vertexBufferOffsetPosition = 0ull; + const deUint32 vertexBufferOffsetTexCoord = vertexSizePosition; + + const deUint32 vertexDataStride = vertexSizePosition + vertexSizeTexCoord; + const VkDeviceSize vertexDataSizeInBytes = sizeInBytes(vertexData); + + vertexBuffer = de::SharedPtr(new Buffer(deviceInterface, *m_logicalDevice, allocator, makeBufferCreateInfo(vertexDataSizeInBytes, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), MemoryRequirement::HostVisible)); + const Allocation& vertexBufferAllocation = vertexBuffer->getAllocation(); + + deMemcpy(vertexBufferAllocation.getHostPtr(), &vertexData[0], static_cast(vertexDataSizeInBytes)); + flushMappedMemoryRange(deviceInterface, *m_logicalDevice, vertexBufferAllocation.getMemory(), vertexBufferAllocation.getOffset(), vertexDataSizeInBytes); + + // Create render pass + const VkAttachmentDescription texelsAttachmentDescription = + { + (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags; + imageSparseInfo.format, // VkFormat format; + VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; + VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp; + VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp; + VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp; + VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout; + }; + + const VkAttachmentDescription residencyAttachmentDescription = + { + (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags; + mapTextureFormat(m_residencyFormat), // VkFormat format; + VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; + VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp; + VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp; + VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp; + VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout; + }; + + const VkAttachmentDescription colorAttachmentsDescription[] = { texelsAttachmentDescription, residencyAttachmentDescription }; + + const VkAttachmentReference texelsAttachmentReference = + { + 0u, // deUint32 attachment; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout; + }; + + const VkAttachmentReference residencyAttachmentReference = + { + 1u, // deUint32 attachment; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout; + }; + + const VkAttachmentReference colorAttachmentsReference[] = { texelsAttachmentReference, residencyAttachmentReference }; + + const VkAttachmentReference depthAttachmentReference = + { + VK_ATTACHMENT_UNUSED, // deUint32 attachment; + VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout layout; + }; + + const VkSubpassDescription subpassDescription = + { + (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags; + VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint; + 0u, // deUint32 inputAttachmentCount; + DE_NULL, // const VkAttachmentReference* pInputAttachments; + 2u, // deUint32 colorAttachmentCount; + colorAttachmentsReference, // const VkAttachmentReference* pColorAttachments; + DE_NULL, // const VkAttachmentReference* pResolveAttachments; + &depthAttachmentReference, // const VkAttachmentReference* pDepthStencilAttachment; + 0u, // deUint32 preserveAttachmentCount; + DE_NULL // const deUint32* pPreserveAttachments; + }; + + const VkRenderPassCreateInfo renderPassInfo = + { + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags; + 2u, // deUint32 attachmentCount; + colorAttachmentsDescription, // const VkAttachmentDescription* pAttachments; + 1u, // deUint32 subpassCount; + &subpassDescription, // const VkSubpassDescription* pSubpasses; + 0u, // deUint32 dependencyCount; + DE_NULL // const VkSubpassDependency* pDependencies; + }; + + renderPass = createRenderPass(deviceInterface, *m_logicalDevice, &renderPassInfo); + + // Create descriptor set layout + DescriptorSetLayoutBuilder descriptorLayerBuilder; + + descriptorLayerBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT); + + const Unique descriptorSetLayout(descriptorLayerBuilder.build(deviceInterface, *m_logicalDevice)); + + // Create descriptor pool + DescriptorPoolBuilder descriptorPoolBuilder; + + descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, imageSparseInfo.mipLevels); + + descriptorPool = descriptorPoolBuilder.build(deviceInterface, *m_logicalDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, imageSparseInfo.mipLevels); + + // Create sampler object + const tcu::Sampler samplerObject(tcu::Sampler::REPEAT_GL, tcu::Sampler::REPEAT_GL, tcu::Sampler::REPEAT_GL, tcu::Sampler::NEAREST_MIPMAP_NEAREST, tcu::Sampler::NEAREST); + const VkSamplerCreateInfo samplerCreateInfo = mapSampler(samplerObject, m_format); + sampler = createSampler(deviceInterface, *m_logicalDevice, &samplerCreateInfo); + + // Create pipeline layout + const VkPushConstantRange lodConstantRange = + { + VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlags stageFlags; + 0u, // deUint32 offset; + sizeof(deUint32), // deUint32 size; + }; + + const VkPipelineLayoutCreateInfo pipelineLayoutParams = + { + VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + 0u, // VkPipelineLayoutCreateFlags flags; + 1u, // deUint32 setLayoutCount; + &descriptorSetLayout.get(), // const VkDescriptorSetLayout* pSetLayouts; + 1u, // deUint32 pushConstantRangeCount; + &lodConstantRange, // const VkPushConstantRange* pPushConstantRanges; + }; + + const Unique pipelineLayout(createPipelineLayout(deviceInterface, *m_logicalDevice, &pipelineLayoutParams)); + + // Create graphics pipeline + const VkVertexInputBindingDescription vertexBinding = + { + 0u, // deUint32 binding; + vertexDataStride, // deUint32 stride; + VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate inputRate; + }; + + const VkVertexInputAttributeDescription vertexAttributePosition = + { + 0u, // deUint32 location; + 0u, // deUint32 binding; + vertexFormatPosition, // VkFormat format; + vertexBufferOffsetPosition, // deUint32 offset; + }; + + const VkVertexInputAttributeDescription vertexAttributeTexCoord = + { + 1u, // deUint32 location; + 0u, // deUint32 binding; + vertexFormatTexCoord, // VkFormat format; + vertexBufferOffsetTexCoord, // deUint32 offset; + }; + + { + GraphicsPipelineBuilder graphicPipelineBuilder; + + graphicPipelineBuilder.addVertexBinding(vertexBinding); + graphicPipelineBuilder.addVertexAttribute(vertexAttributePosition); + graphicPipelineBuilder.addVertexAttribute(vertexAttributeTexCoord); + graphicPipelineBuilder.setPrimitiveTopology(vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP); + graphicPipelineBuilder.addDynamicState(VK_DYNAMIC_STATE_VIEWPORT); + graphicPipelineBuilder.addDynamicState(VK_DYNAMIC_STATE_SCISSOR); + graphicPipelineBuilder.setAttachmentsCount(2u); + graphicPipelineBuilder.setShader(deviceInterface, *m_logicalDevice, VK_SHADER_STAGE_VERTEX_BIT, m_context.getBinaryCollection().get("vertex_shader"), DE_NULL); + graphicPipelineBuilder.setShader(deviceInterface, *m_logicalDevice, VK_SHADER_STAGE_FRAGMENT_BIT, m_context.getBinaryCollection().get("fragment_shader"), DE_NULL); + + if (imageSparseInfo.arrayLayers > 1u) + { + requireFeatures(instance, physicalDevice, FEATURE_GEOMETRY_SHADER); + graphicPipelineBuilder.setShader(deviceInterface, *m_logicalDevice, VK_SHADER_STAGE_GEOMETRY_BIT, m_context.getBinaryCollection().get("geometry_shader"), DE_NULL); + } + + pipelines.push_back(makeVkSharedPtr(graphicPipelineBuilder.build(deviceInterface, *m_logicalDevice, *pipelineLayout, *renderPass))); + } + + const VkPipeline graphicsPipeline = **pipelines[0]; + + { + const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers); + + VkImageMemoryBarrier imageShaderAccessBarriers[3]; + + imageShaderAccessBarriers[0] = makeImageMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_SHADER_READ_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, + imageSparse, + fullImageSubresourceRange + ); + + imageShaderAccessBarriers[1] = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + imageTexels, + fullImageSubresourceRange + ); + + imageShaderAccessBarriers[2] = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + imageResidency, + fullImageSubresourceRange + ); + + deviceInterface.cmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 3u, imageShaderAccessBarriers); + } + + imageSparseViews.resize(imageSparseInfo.mipLevels); + imageTexelsViews.resize(imageSparseInfo.mipLevels); + imageResidencyViews.resize(imageSparseInfo.mipLevels); + framebuffers.resize(imageSparseInfo.mipLevels); + descriptorSets.resize(imageSparseInfo.mipLevels); + + std::vector clearValues; + clearValues.push_back(makeClearValueColor(tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f))); + clearValues.push_back(makeClearValueColor(tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f))); + + for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + const vk::VkExtent3D mipLevelSize = mipLevelExtents(imageSparseInfo.extent, mipLevelNdx); + + const vk::VkRect2D renderArea = + { + makeOffset2D(0u, 0u), + makeExtent2D(mipLevelSize.width, mipLevelSize.height), + }; + + const VkViewport viewport = makeViewport + ( + 0.0f, 0.0f, + static_cast(mipLevelSize.width), static_cast(mipLevelSize.height), + 0.0f, 1.0f + ); + + const VkImageSubresourceRange mipLevelRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevelNdx, 1u, 0u, imageSparseInfo.arrayLayers); + + // Create color attachments image views + imageTexelsViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, *m_logicalDevice, imageTexels, mapImageViewType(m_imageType), imageSparseInfo.format, mipLevelRange)); + imageResidencyViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, *m_logicalDevice, imageResidency, mapImageViewType(m_imageType), mapTextureFormat(m_residencyFormat), mipLevelRange)); + + const VkImageView attachmentsViews[] = { **imageTexelsViews[mipLevelNdx], **imageResidencyViews[mipLevelNdx] }; + + // Create framebuffer + const VkFramebufferCreateInfo framebufferInfo = + { + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags; + *renderPass, // VkRenderPass renderPass; + 2u, // uint32_t attachmentCount; + attachmentsViews, // const VkImageView* pAttachments; + mipLevelSize.width, // uint32_t width; + mipLevelSize.height, // uint32_t height; + imageSparseInfo.arrayLayers, // uint32_t layers; + }; + + framebuffers[mipLevelNdx] = makeVkSharedPtr(createFramebuffer(deviceInterface, *m_logicalDevice, &framebufferInfo)); + + // Create descriptor set + descriptorSets[mipLevelNdx] = makeVkSharedPtr(makeDescriptorSet(deviceInterface, *m_logicalDevice, *descriptorPool, *descriptorSetLayout)); + const VkDescriptorSet descriptorSet = **descriptorSets[mipLevelNdx]; + + // Update descriptor set + const VkImageSubresourceRange sparseImageSubresourceRange = sampledImageRangeToBind(imageSparseInfo, mipLevelNdx); + + imageSparseViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, *m_logicalDevice, imageSparse, mapImageViewType(m_imageType), imageSparseInfo.format, sparseImageSubresourceRange)); + + const VkDescriptorImageInfo imageSparseDescInfo = makeDescriptorImageInfo(*sampler, **imageSparseViews[mipLevelNdx], VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); + + DescriptorSetUpdateBuilder descriptorUpdateBuilder; + + descriptorUpdateBuilder.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(BINDING_IMAGE_SPARSE), VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, &imageSparseDescInfo); + descriptorUpdateBuilder.update(deviceInterface, *m_logicalDevice); + + // Begin render pass + beginRenderPass(deviceInterface, commandBuffer, *renderPass, **framebuffers[mipLevelNdx], renderArea, clearValues); + + // Bind graphics pipeline + deviceInterface.cmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, graphicsPipeline); + + // Bind descriptor set + deviceInterface.cmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL); + + // Bind vertex buffer + deviceInterface.cmdBindVertexBuffers(commandBuffer, 0u, 1u, &vertexBuffer->get(), &vertexBufferStartOffset); + + // Bind Viewport + deviceInterface.cmdSetViewport(commandBuffer, 0u, 1u, &viewport); + + // Bind Scissor Rectangle + deviceInterface.cmdSetScissor(commandBuffer, 0u, 1u, &renderArea); + + // Update push constants + deviceInterface.cmdPushConstants(commandBuffer, *pipelineLayout, VK_SHADER_STAGE_FRAGMENT_BIT, 0u, sizeof(deUint32), &mipLevelNdx); + + // Draw full screen quad + deviceInterface.cmdDraw(commandBuffer, 4u, 1u, 0u, 0u); + + // End render pass + endRenderPass(deviceInterface, commandBuffer); + } + + { + const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers); + + VkImageMemoryBarrier imageOutputTransferSrcBarriers[2]; + + imageOutputTransferSrcBarriers[0] = makeImageMemoryBarrier + ( + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + imageTexels, + fullImageSubresourceRange + ); + + imageOutputTransferSrcBarriers[1] = makeImageMemoryBarrier + ( + VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + imageResidency, + fullImageSubresourceRange + ); + + deviceInterface.cmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 2u, imageOutputTransferSrcBarriers); + } +} + +class SparseShaderIntrinsicsInstanceSampledExplicit : public SparseShaderIntrinsicsInstanceSampledBase +{ +public: + SparseShaderIntrinsicsInstanceSampledExplicit (Context& context, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsInstanceSampledBase(context, function, imageType, imageSize, format) {} + + VkImageSubresourceRange sampledImageRangeToBind(const VkImageCreateInfo& imageSparseInfo, const deUint32 mipLevel) const; +}; + +VkImageSubresourceRange SparseShaderIntrinsicsInstanceSampledExplicit::sampledImageRangeToBind (const VkImageCreateInfo& imageSparseInfo, const deUint32 mipLevel) const +{ + DE_UNREF(mipLevel); + + return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers); +} + +TestInstance* SparseShaderIntrinsicsCaseSampledExplicit::createInstance (Context& context) const +{ + return new SparseShaderIntrinsicsInstanceSampledExplicit(context, m_function, m_imageType, m_imageSize, m_format); +} + +class SparseShaderIntrinsicsInstanceSampledImplicit : public SparseShaderIntrinsicsInstanceSampledBase +{ +public: + SparseShaderIntrinsicsInstanceSampledImplicit (Context& context, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsInstanceSampledBase(context, function, imageType, imageSize, format) {} + + VkImageSubresourceRange sampledImageRangeToBind(const VkImageCreateInfo& imageSparseInfo, const deUint32 mipLevel) const; +}; + +VkImageSubresourceRange SparseShaderIntrinsicsInstanceSampledImplicit::sampledImageRangeToBind (const VkImageCreateInfo& imageSparseInfo, const deUint32 mipLevel) const +{ + return makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevel, 1u, 0u, imageSparseInfo.arrayLayers); +} + +TestInstance* SparseShaderIntrinsicsCaseSampledImplicit::createInstance (Context& context) const +{ + return new SparseShaderIntrinsicsInstanceSampledImplicit(context, m_function, m_imageType, m_imageSize, m_format); +} + +} // sparse +} // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.hpp new file mode 100644 index 0000000..f898214 --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsSampled.hpp @@ -0,0 +1,126 @@ +#ifndef _VKTSPARSERESOURCESSHADERINTRINSICSSAMPLED_HPP +#define _VKTSPARSERESOURCESSHADERINTRINSICSSAMPLED_HPP +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//*! + * \file vktSparseResourcesShaderIntrinsicsSampled.hpp + * \brief Sparse Resources Shader Intrinsics for sampled images + *//*--------------------------------------------------------------------*/ + +#include "vktSparseResourcesShaderIntrinsicsBase.hpp" + +namespace vkt +{ +namespace sparse +{ + +class SparseShaderIntrinsicsCaseSampledBase : public SparseShaderIntrinsicsCaseBase +{ +public: + SparseShaderIntrinsicsCaseSampledBase (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseBase(testCtx, name, function, imageType, imageSize, format) {} + + void initPrograms (vk::SourceCollections& programCollection) const; + + virtual std::string sparseImageOpString (const std::string& resultVariable, + const std::string& resultType, + const std::string& image, + const std::string& coord, + const std::string& miplevel) const = 0; +}; + +class SparseShaderIntrinsicsCaseSampledExplicit : public SparseShaderIntrinsicsCaseSampledBase +{ +public: + SparseShaderIntrinsicsCaseSampledExplicit (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseSampledBase(testCtx, name, function, imageType, imageSize, format) {} + + TestInstance* createInstance (Context& context) const; +}; + +class SparseCaseOpImageSparseSampleExplicitLod : public SparseShaderIntrinsicsCaseSampledExplicit +{ +public: + SparseCaseOpImageSparseSampleExplicitLod (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseSampledExplicit(testCtx, name, function, imageType, imageSize, format) {} + + std::string sparseImageOpString(const std::string& resultVariable, const std::string& resultType, const std::string& image, const std::string& coord, const std::string& miplevel) const; +}; + +class SparseShaderIntrinsicsCaseSampledImplicit : public SparseShaderIntrinsicsCaseSampledBase +{ +public: + SparseShaderIntrinsicsCaseSampledImplicit (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseSampledBase(testCtx, name, function, imageType, imageSize, format) {} + + TestInstance* createInstance (Context& context) const; +}; + +class SparseCaseOpImageSparseSampleImplicitLod : public SparseShaderIntrinsicsCaseSampledImplicit +{ +public: + SparseCaseOpImageSparseSampleImplicitLod (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseSampledImplicit(testCtx, name, function, imageType, imageSize, format) {} + + std::string sparseImageOpString(const std::string& resultVariable, const std::string& resultType, const std::string& image, const std::string& coord, const std::string& miplevel) const; +}; + +class SparseCaseOpImageSparseGather : public SparseShaderIntrinsicsCaseSampledImplicit +{ +public: + SparseCaseOpImageSparseGather (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseSampledImplicit(testCtx, name, function, imageType, imageSize, format) {} + + std::string sparseImageOpString(const std::string& resultVariable, const std::string& resultType, const std::string& image, const std::string& coord, const std::string& miplevel) const; +}; + +} // sparse +} // vkt + +#endif // _VKTSPARSERESOURCESSHADERINTRINSICSSAMPLED_HPP \ No newline at end of file diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.cpp new file mode 100644 index 0000000..c00cc44 --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.cpp @@ -0,0 +1,582 @@ +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//* + * \file vktSparseResourcesShaderIntrinsicsStorage.cpp + * \brief Sparse Resources Shader Intrinsics for storage images + *//*--------------------------------------------------------------------*/ + +#include "vktSparseResourcesShaderIntrinsicsStorage.hpp" + +using namespace vk; + +namespace vkt +{ +namespace sparse +{ + +tcu::UVec3 computeWorkGroupSize (const tcu::UVec3& gridSize) +{ + const deUint32 maxComputeWorkGroupInvocations = 128u; + const tcu::UVec3 maxComputeWorkGroupSize = tcu::UVec3(128u, 128u, 64u); + + const deUint32 xWorkGroupSize = std::min(std::min(gridSize.x(), maxComputeWorkGroupSize.x()), maxComputeWorkGroupInvocations); + const deUint32 yWorkGroupSize = std::min(std::min(gridSize.y(), maxComputeWorkGroupSize.y()), maxComputeWorkGroupInvocations / xWorkGroupSize); + const deUint32 zWorkGroupSize = std::min(std::min(gridSize.z(), maxComputeWorkGroupSize.z()), maxComputeWorkGroupInvocations / (xWorkGroupSize*yWorkGroupSize)); + + return tcu::UVec3(xWorkGroupSize, yWorkGroupSize, zWorkGroupSize); +} + +void SparseShaderIntrinsicsCaseStorage::initPrograms (vk::SourceCollections& programCollection) const +{ + const std::string imageTypeStr = getShaderImageType(m_format, m_imageType); + const std::string formatDataStr = getShaderImageDataType(m_format); + const std::string formatQualStr = getShaderImageFormatQualifier(m_format); + + const std::string coordString = getShaderImageCoordinates(m_imageType, + "%local_int_GlobalInvocationID_x", + "%local_ivec2_GlobalInvocationID_xy", + "%local_ivec3_GlobalInvocationID_xyz"); + // Create compute program + std::ostringstream src; + + src << "OpCapability Shader\n" + << "OpCapability ImageCubeArray\n" + << "OpCapability SparseResidency\n" + << "OpCapability StorageImageExtendedFormats\n" + + << "%ext_import = OpExtInstImport \"GLSL.std.450\"\n" + << "OpMemoryModel Logical GLSL450\n" + << "OpEntryPoint GLCompute %func_main \"main\" %input_GlobalInvocationID\n" + << "OpExecutionMode %func_main LocalSize 1 1 1\n" + << "OpSource GLSL 440\n" + + << "OpName %func_main \"main\"\n" + + << "OpName %input_GlobalInvocationID \"gl_GlobalInvocationID\"\n" + << "OpName %input_WorkGroupSize \"gl_WorkGroupSize\"\n" + + << "OpName %uniform_image_sparse \"u_imageSparse\"\n" + << "OpName %uniform_image_texels \"u_imageTexels\"\n" + << "OpName %uniform_image_residency \"u_imageResidency\"\n" + + << "OpDecorate %input_GlobalInvocationID BuiltIn GlobalInvocationId\n" + + << "OpDecorate %input_WorkGroupSize BuiltIn WorkgroupSize\n" + + << "OpDecorate %constant_uint_grid_x SpecId 1\n" + << "OpDecorate %constant_uint_grid_y SpecId 2\n" + << "OpDecorate %constant_uint_grid_z SpecId 3\n" + + << "OpDecorate %constant_uint_work_group_size_x SpecId 4\n" + << "OpDecorate %constant_uint_work_group_size_y SpecId 5\n" + << "OpDecorate %constant_uint_work_group_size_z SpecId 6\n" + + << "OpDecorate %uniform_image_sparse DescriptorSet 0\n" + << "OpDecorate %uniform_image_sparse Binding " << BINDING_IMAGE_SPARSE << "\n" + + << "OpDecorate %uniform_image_texels DescriptorSet 0\n" + << "OpDecorate %uniform_image_texels Binding " << BINDING_IMAGE_TEXELS << "\n" + << "OpDecorate %uniform_image_texels NonReadable\n" + + << "OpDecorate %uniform_image_residency DescriptorSet 0\n" + << "OpDecorate %uniform_image_residency Binding " << BINDING_IMAGE_RESIDENCY << "\n" + << "OpDecorate %uniform_image_residency NonReadable\n" + + // Declare data types + << "%type_bool = OpTypeBool\n" + << "%type_int = OpTypeInt 32 1\n" + << "%type_uint = OpTypeInt 32 0\n" + << "%type_ivec2 = OpTypeVector %type_int 2\n" + << "%type_ivec3 = OpTypeVector %type_int 3\n" + << "%type_uvec3 = OpTypeVector %type_uint 3\n" + << "%type_uvec4 = OpTypeVector %type_uint 4\n" + << "%type_img_comp = " << getOpTypeImageComponent(m_format) << "\n" + << "%type_img_comp_vec4 = OpTypeVector %type_img_comp 4\n" + << "%type_struct_int_img_comp_vec4 = OpTypeStruct %type_int %type_img_comp_vec4\n" + + << "%type_input_uint = OpTypePointer Input %type_uint\n" + << "%type_input_uvec3 = OpTypePointer Input %type_uvec3\n" + + << "%type_function_int = OpTypePointer Function %type_int\n" + << "%type_function_img_comp_vec4 = OpTypePointer Function %type_img_comp_vec4\n" + + << "%type_void = OpTypeVoid\n" + << "%type_void_func = OpTypeFunction %type_void\n" + + // Sparse image type declaration + << sparseImageTypeDecl("%type_image_sparse", "%type_img_comp") + << "%type_uniformconst_image_sparse = OpTypePointer UniformConstant %type_image_sparse\n" + + // Texels image type declaration + << "%type_image_texels = " << getOpTypeImageSparse(m_imageType, m_format, "%type_img_comp", false) << "\n" + << "%type_uniformconst_image_texels = OpTypePointer UniformConstant %type_image_texels\n" + + // Residency image type declaration + << "%type_image_residency = " << getOpTypeImageResidency(m_imageType) << "\n" + << "%type_uniformconst_image_residency = OpTypePointer UniformConstant %type_image_residency\n" + + // Declare sparse image variable + << "%uniform_image_sparse = OpVariable %type_uniformconst_image_sparse UniformConstant\n" + + // Declare output image variable for storing texels + << "%uniform_image_texels = OpVariable %type_uniformconst_image_texels UniformConstant\n" + + // Declare output image variable for storing residency information + << "%uniform_image_residency = OpVariable %type_uniformconst_image_residency UniformConstant\n" + + // Declare input variables + << "%input_GlobalInvocationID = OpVariable %type_input_uvec3 Input\n" + + << "%constant_uint_grid_x = OpSpecConstant %type_uint 1\n" + << "%constant_uint_grid_y = OpSpecConstant %type_uint 1\n" + << "%constant_uint_grid_z = OpSpecConstant %type_uint 1\n" + + << "%constant_uint_work_group_size_x = OpSpecConstant %type_uint 1\n" + << "%constant_uint_work_group_size_y = OpSpecConstant %type_uint 1\n" + << "%constant_uint_work_group_size_z = OpSpecConstant %type_uint 1\n" + << "%input_WorkGroupSize = OpSpecConstantComposite %type_uvec3 %constant_uint_work_group_size_x %constant_uint_work_group_size_y %constant_uint_work_group_size_z\n" + + // Declare constants + << "%constant_uint_0 = OpConstant %type_uint 0\n" + << "%constant_uint_1 = OpConstant %type_uint 1\n" + << "%constant_uint_2 = OpConstant %type_uint 2\n" + << "%constant_int_0 = OpConstant %type_int 0\n" + << "%constant_int_1 = OpConstant %type_int 1\n" + << "%constant_int_2 = OpConstant %type_int 2\n" + << "%constant_bool_true = OpConstantTrue %type_bool\n" + << "%constant_uint_resident = OpConstant %type_uint " << MEMORY_BLOCK_BOUND_VALUE << "\n" + << "%constant_uvec4_resident = OpConstantComposite %type_uvec4 %constant_uint_resident %constant_uint_resident %constant_uint_resident %constant_uint_resident\n" + << "%constant_uint_not_resident = OpConstant %type_uint " << MEMORY_BLOCK_NOT_BOUND_VALUE << "\n" + << "%constant_uvec4_not_resident = OpConstantComposite %type_uvec4 %constant_uint_not_resident %constant_uint_not_resident %constant_uint_not_resident %constant_uint_not_resident\n" + + // Call main function + << "%func_main = OpFunction %type_void None %type_void_func\n" + << "%label_func_main = OpLabel\n" + + // Load GlobalInvocationID.xyz into local variables + << "%access_GlobalInvocationID_x = OpAccessChain %type_input_uint %input_GlobalInvocationID %constant_uint_0\n" + << "%local_uint_GlobalInvocationID_x = OpLoad %type_uint %access_GlobalInvocationID_x\n" + << "%local_int_GlobalInvocationID_x = OpBitcast %type_int %local_uint_GlobalInvocationID_x\n" + + << "%access_GlobalInvocationID_y = OpAccessChain %type_input_uint %input_GlobalInvocationID %constant_uint_1\n" + << "%local_uint_GlobalInvocationID_y = OpLoad %type_uint %access_GlobalInvocationID_y\n" + << "%local_int_GlobalInvocationID_y = OpBitcast %type_int %local_uint_GlobalInvocationID_y\n" + + << "%access_GlobalInvocationID_z = OpAccessChain %type_input_uint %input_GlobalInvocationID %constant_uint_2\n" + << "%local_uint_GlobalInvocationID_z = OpLoad %type_uint %access_GlobalInvocationID_z\n" + << "%local_int_GlobalInvocationID_z = OpBitcast %type_int %local_uint_GlobalInvocationID_z\n" + + << "%local_ivec2_GlobalInvocationID_xy = OpCompositeConstruct %type_ivec2 %local_int_GlobalInvocationID_x %local_int_GlobalInvocationID_y\n" + << "%local_ivec3_GlobalInvocationID_xyz = OpCompositeConstruct %type_ivec3 %local_int_GlobalInvocationID_x %local_int_GlobalInvocationID_y %local_int_GlobalInvocationID_z\n" + + << "%comparison_range_x = OpULessThan %type_bool %local_uint_GlobalInvocationID_x %constant_uint_grid_x\n" + << "OpSelectionMerge %label_out_range_x None\n" + << "OpBranchConditional %comparison_range_x %label_in_range_x %label_out_range_x\n" + << "%label_in_range_x = OpLabel\n" + + << "%comparison_range_y = OpULessThan %type_bool %local_uint_GlobalInvocationID_y %constant_uint_grid_y\n" + << "OpSelectionMerge %label_out_range_y None\n" + << "OpBranchConditional %comparison_range_y %label_in_range_y %label_out_range_y\n" + << "%label_in_range_y = OpLabel\n" + + << "%comparison_range_z = OpULessThan %type_bool %local_uint_GlobalInvocationID_z %constant_uint_grid_z\n" + << "OpSelectionMerge %label_out_range_z None\n" + << "OpBranchConditional %comparison_range_z %label_in_range_z %label_out_range_z\n" + << "%label_in_range_z = OpLabel\n" + + // Load sparse image + << "%local_image_sparse = OpLoad %type_image_sparse %uniform_image_sparse\n" + + // Call OpImageSparse* + << sparseImageOpString("%local_sparse_op_result", "%type_struct_int_img_comp_vec4", "%local_image_sparse", coordString, "%constant_int_0") << "\n" + + // Load the texel from the sparse image to local variable for OpImageSparse* + << "%local_img_comp_vec4 = OpCompositeExtract %type_img_comp_vec4 %local_sparse_op_result 1\n" + + // Load residency code for OpImageSparse* + << "%local_residency_code = OpCompositeExtract %type_int %local_sparse_op_result 0\n" + // End Call OpImageSparse* + + // Load texels image + << "%local_image_texels = OpLoad %type_image_texels %uniform_image_texels\n" + + // Write the texel to output image via OpImageWrite + << "OpImageWrite %local_image_texels " << coordString << " %local_img_comp_vec4\n" + + // Load residency info image + << "%local_image_residency = OpLoad %type_image_residency %uniform_image_residency\n" + + // Check if loaded texel is placed in resident memory + << "%local_texel_resident = OpImageSparseTexelsResident %type_bool %local_residency_code\n" + << "OpSelectionMerge %branch_texel_resident None\n" + << "OpBranchConditional %local_texel_resident %label_texel_resident %label_texel_not_resident\n" + << "%label_texel_resident = OpLabel\n" + + // Loaded texel is in resident memory + << "OpImageWrite %local_image_residency " << coordString << " %constant_uvec4_resident\n" + + << "OpBranch %branch_texel_resident\n" + << "%label_texel_not_resident = OpLabel\n" + + // Loaded texel is not in resident memory + << "OpImageWrite %local_image_residency " << coordString << " %constant_uvec4_not_resident\n" + + << "OpBranch %branch_texel_resident\n" + << "%branch_texel_resident = OpLabel\n" + + << "OpBranch %label_out_range_z\n" + << "%label_out_range_z = OpLabel\n" + + << "OpBranch %label_out_range_y\n" + << "%label_out_range_y = OpLabel\n" + + << "OpBranch %label_out_range_x\n" + << "%label_out_range_x = OpLabel\n" + + << "OpReturn\n" + << "OpFunctionEnd\n"; + + programCollection.spirvAsmSources.add("compute") << src.str(); +} + +std::string SparseCaseOpImageSparseFetch::sparseImageTypeDecl (const std::string& imageType, const std::string& componentType) const +{ + std::ostringstream src; + + src << imageType << " = " << getOpTypeImageSparse(m_imageType, m_format, componentType, true) << "\n"; + + return src.str(); +} + +std::string SparseCaseOpImageSparseFetch::sparseImageOpString (const std::string& resultVariable, + const std::string& resultType, + const std::string& image, + const std::string& coord, + const std::string& mipLevel) const +{ + std::ostringstream src; + + src << resultVariable << " = OpImageSparseFetch " << resultType << " " << image << " " << coord << " Lod " << mipLevel << "\n"; + + return src.str(); +} + +std::string SparseCaseOpImageSparseRead::sparseImageTypeDecl (const std::string& imageType, const std::string& componentType) const +{ + std::ostringstream src; + + src << imageType << " = " << getOpTypeImageSparse(m_imageType, m_format, componentType, false) << "\n"; + + return src.str(); +} + +std::string SparseCaseOpImageSparseRead::sparseImageOpString (const std::string& resultVariable, + const std::string& resultType, + const std::string& image, + const std::string& coord, + const std::string& mipLevel) const +{ + DE_UNREF(mipLevel); + + std::ostringstream src; + + src << resultVariable << " = OpImageSparseRead " << resultType << " " << image << " " << coord << "\n"; + + return src.str(); +} + +class SparseShaderIntrinsicsInstanceStorage : public SparseShaderIntrinsicsInstanceBase +{ +public: + SparseShaderIntrinsicsInstanceStorage (Context& context, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsInstanceBase(context, function, imageType, imageSize, format) {} + + VkImageUsageFlags imageOutputUsageFlags (void) const; + + VkQueueFlags getQueueFlags (void) const; + + void recordCommands (vk::Allocator& allocator, + const VkCommandBuffer commandBuffer, + const VkImageCreateInfo& imageSparseInfo, + const VkImage imageSparse, + const VkImage imageTexels, + const VkImage imageResidency); + + virtual VkDescriptorType imageSparseDescType (void) const = 0; +}; + +VkImageUsageFlags SparseShaderIntrinsicsInstanceStorage::imageOutputUsageFlags (void) const +{ + return VK_IMAGE_USAGE_STORAGE_BIT; +} + +VkQueueFlags SparseShaderIntrinsicsInstanceStorage::getQueueFlags (void) const +{ + return VK_QUEUE_COMPUTE_BIT; +} + +void SparseShaderIntrinsicsInstanceStorage::recordCommands (vk::Allocator& allocator, + const VkCommandBuffer commandBuffer, + const VkImageCreateInfo& imageSparseInfo, + const VkImage imageSparse, + const VkImage imageTexels, + const VkImage imageResidency) +{ + const InstanceInterface& instance = m_context.getInstanceInterface(); + const DeviceInterface& deviceInterface = m_context.getDeviceInterface(); + const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + + DE_UNREF(allocator); + + // Check if device supports image format for storage image + if (!checkImageFormatFeatureSupport(instance, physicalDevice, imageSparseInfo.format, VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) + TCU_THROW(NotSupportedError, "Device does not support image format for storage image"); + + // Make sure device supports VK_FORMAT_R32_UINT format for storage image + if (!checkImageFormatFeatureSupport(instance, physicalDevice, mapTextureFormat(m_residencyFormat), VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) + TCU_THROW(TestError, "Device does not support VK_FORMAT_R32_UINT format for storage image"); + + pipelines.resize(imageSparseInfo.mipLevels); + descriptorSets.resize(imageSparseInfo.mipLevels); + imageSparseViews.resize(imageSparseInfo.mipLevels); + imageTexelsViews.resize(imageSparseInfo.mipLevels); + imageResidencyViews.resize(imageSparseInfo.mipLevels); + + // Create descriptor set layout + DescriptorSetLayoutBuilder descriptorLayerBuilder; + + descriptorLayerBuilder.addSingleBinding(imageSparseDescType(), VK_SHADER_STAGE_COMPUTE_BIT); + descriptorLayerBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT); + descriptorLayerBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT); + + const Unique descriptorSetLayout(descriptorLayerBuilder.build(deviceInterface, *m_logicalDevice)); + + // Create pipeline layout + const Unique pipelineLayout(makePipelineLayout(deviceInterface, *m_logicalDevice, *descriptorSetLayout)); + + // Create descriptor pool + DescriptorPoolBuilder descriptorPoolBuilder; + + descriptorPoolBuilder.addType(imageSparseDescType(), imageSparseInfo.mipLevels); + descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageSparseInfo.mipLevels); + descriptorPoolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, imageSparseInfo.mipLevels); + + descriptorPool = descriptorPoolBuilder.build(deviceInterface, *m_logicalDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, imageSparseInfo.mipLevels); + + const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, imageSparseInfo.mipLevels, 0u, imageSparseInfo.arrayLayers); + + { + VkImageMemoryBarrier imageShaderAccessBarriers[3]; + + imageShaderAccessBarriers[0] = makeImageMemoryBarrier + ( + VK_ACCESS_TRANSFER_WRITE_BIT, + VK_ACCESS_SHADER_READ_BIT, + VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, + VK_IMAGE_LAYOUT_GENERAL, + imageSparse, + fullImageSubresourceRange + ); + + imageShaderAccessBarriers[1] = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_SHADER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_GENERAL, + imageTexels, + fullImageSubresourceRange + ); + + imageShaderAccessBarriers[2] = makeImageMemoryBarrier + ( + 0u, + VK_ACCESS_SHADER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, + VK_IMAGE_LAYOUT_GENERAL, + imageResidency, + fullImageSubresourceRange + ); + + deviceInterface.cmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 3u, imageShaderAccessBarriers); + } + + const VkSpecializationMapEntry specializationMapEntries[6] = + { + { 1u, 0u * sizeof(deUint32), sizeof(deUint32) }, // GridSize.x + { 2u, 1u * sizeof(deUint32), sizeof(deUint32) }, // GridSize.y + { 3u, 2u * sizeof(deUint32), sizeof(deUint32) }, // GridSize.z + { 4u, 3u * sizeof(deUint32), sizeof(deUint32) }, // WorkGroupSize.x + { 5u, 4u * sizeof(deUint32), sizeof(deUint32) }, // WorkGroupSize.y + { 6u, 5u * sizeof(deUint32), sizeof(deUint32) }, // WorkGroupSize.z + }; + + Unique shaderModule(createShaderModule(deviceInterface, *m_logicalDevice, m_context.getBinaryCollection().get("compute"), 0u)); + + for (deUint32 mipLevelNdx = 0u; mipLevelNdx < imageSparseInfo.mipLevels; ++mipLevelNdx) + { + const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize, mipLevelNdx); + const tcu::UVec3 workGroupSize = computeWorkGroupSize(gridSize); + const tcu::UVec3 specializationData[2] = { gridSize, workGroupSize }; + + const VkSpecializationInfo specializationInfo = + { + sizeof(specializationMapEntries) / sizeof(VkSpecializationMapEntry), // mapEntryCount + specializationMapEntries, // pMapEntries + sizeof(specializationData), // dataSize + specializationData, // pData + }; + + // Create and bind compute pipeline + pipelines[mipLevelNdx] = makeVkSharedPtr(makeComputePipeline(deviceInterface, *m_logicalDevice, *pipelineLayout, *shaderModule, &specializationInfo)); + const VkPipeline computePipeline = **pipelines[mipLevelNdx]; + + deviceInterface.cmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, computePipeline); + + // Create descriptor set + descriptorSets[mipLevelNdx] = makeVkSharedPtr(makeDescriptorSet(deviceInterface, *m_logicalDevice, *descriptorPool, *descriptorSetLayout)); + const VkDescriptorSet descriptorSet = **descriptorSets[mipLevelNdx]; + + // Bind resources + const VkImageSubresourceRange mipLevelRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, mipLevelNdx, 1u, 0u, imageSparseInfo.arrayLayers); + + imageSparseViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, *m_logicalDevice, imageSparse, mapImageViewType(m_imageType), imageSparseInfo.format, mipLevelRange)); + const VkDescriptorImageInfo imageSparseDescInfo = makeDescriptorImageInfo(DE_NULL, **imageSparseViews[mipLevelNdx], VK_IMAGE_LAYOUT_GENERAL); + + imageTexelsViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, *m_logicalDevice, imageTexels, mapImageViewType(m_imageType), imageSparseInfo.format, mipLevelRange)); + const VkDescriptorImageInfo imageTexelsDescInfo = makeDescriptorImageInfo(DE_NULL, **imageTexelsViews[mipLevelNdx], VK_IMAGE_LAYOUT_GENERAL); + + imageResidencyViews[mipLevelNdx] = makeVkSharedPtr(makeImageView(deviceInterface, *m_logicalDevice, imageResidency, mapImageViewType(m_imageType), mapTextureFormat(m_residencyFormat), mipLevelRange)); + const VkDescriptorImageInfo imageResidencyDescInfo = makeDescriptorImageInfo(DE_NULL, **imageResidencyViews[mipLevelNdx], VK_IMAGE_LAYOUT_GENERAL); + + DescriptorSetUpdateBuilder descriptorUpdateBuilder; + descriptorUpdateBuilder.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(BINDING_IMAGE_SPARSE), imageSparseDescType(), &imageSparseDescInfo); + descriptorUpdateBuilder.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(BINDING_IMAGE_TEXELS), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &imageTexelsDescInfo); + descriptorUpdateBuilder.writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(BINDING_IMAGE_RESIDENCY), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &imageResidencyDescInfo); + + descriptorUpdateBuilder.update(deviceInterface, *m_logicalDevice); + + deviceInterface.cmdBindDescriptorSets(commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL); + + const deUint32 xWorkGroupCount = gridSize.x() / workGroupSize.x() + (gridSize.x() % workGroupSize.x() ? 1u : 0u); + const deUint32 yWorkGroupCount = gridSize.y() / workGroupSize.y() + (gridSize.y() % workGroupSize.y() ? 1u : 0u); + const deUint32 zWorkGroupCount = gridSize.z() / workGroupSize.z() + (gridSize.z() % workGroupSize.z() ? 1u : 0u); + const tcu::UVec3 maxWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u); + + if (maxWorkGroupCount.x() < xWorkGroupCount || + maxWorkGroupCount.y() < yWorkGroupCount || + maxWorkGroupCount.z() < zWorkGroupCount) + { + TCU_THROW(NotSupportedError, "Image size exceeds compute invocations limit"); + } + + deviceInterface.cmdDispatch(commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); + } + + { + VkImageMemoryBarrier imageOutputTransferSrcBarriers[2]; + + imageOutputTransferSrcBarriers[0] = makeImageMemoryBarrier + ( + VK_ACCESS_SHADER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_GENERAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + imageTexels, + fullImageSubresourceRange + ); + + imageOutputTransferSrcBarriers[1] = makeImageMemoryBarrier + ( + VK_ACCESS_SHADER_WRITE_BIT, + VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_GENERAL, + VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + imageResidency, + fullImageSubresourceRange + ); + + deviceInterface.cmdPipelineBarrier(commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 2u, imageOutputTransferSrcBarriers); + } +} + +class SparseShaderIntrinsicsInstanceFetch : public SparseShaderIntrinsicsInstanceStorage +{ +public: + SparseShaderIntrinsicsInstanceFetch (Context& context, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsInstanceStorage(context, function, imageType, imageSize, format) {} + + VkImageUsageFlags imageSparseUsageFlags (void) const; + VkDescriptorType imageSparseDescType (void) const; +}; + +VkImageUsageFlags SparseShaderIntrinsicsInstanceFetch::imageSparseUsageFlags (void) const +{ + return VK_IMAGE_USAGE_SAMPLED_BIT; +} + +VkDescriptorType SparseShaderIntrinsicsInstanceFetch::imageSparseDescType (void) const +{ + return VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; +} + +TestInstance* SparseCaseOpImageSparseFetch::createInstance (Context& context) const +{ + return new SparseShaderIntrinsicsInstanceFetch(context, m_function, m_imageType, m_imageSize, m_format); +} + +class SparseShaderIntrinsicsInstanceRead : public SparseShaderIntrinsicsInstanceStorage +{ +public: + SparseShaderIntrinsicsInstanceRead (Context& context, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsInstanceStorage(context, function, imageType, imageSize, format) {} + + VkImageUsageFlags imageSparseUsageFlags (void) const; + VkDescriptorType imageSparseDescType (void) const; +}; + +VkImageUsageFlags SparseShaderIntrinsicsInstanceRead::imageSparseUsageFlags (void) const +{ + return VK_IMAGE_USAGE_STORAGE_BIT; +} + +VkDescriptorType SparseShaderIntrinsicsInstanceRead::imageSparseDescType (void) const +{ + return VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; +} + +TestInstance* SparseCaseOpImageSparseRead::createInstance (Context& context) const +{ + return new SparseShaderIntrinsicsInstanceRead(context, m_function, m_imageType, m_imageSize, m_format); +} + +} // sparse +} // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.hpp new file mode 100644 index 0000000..47f1b7c --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesShaderIntrinsicsStorage.hpp @@ -0,0 +1,95 @@ +#ifndef _VKTSPARSERESOURCESSHADERINTRINSICSSTORAGE_HPP +#define _VKTSPARSERESOURCESSHADERINTRINSICSSTORAGE_HPP +/*------------------------------------------------------------------------ + * Vulkan Conformance Tests + * ------------------------ + * + * Copyright (c) 2016 The Khronos Group Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + *//*! + * \file vktSparseResourcesShaderIntrinsicsStorage.hpp + * \brief Sparse Resources Shader Intrinsics for storage images + *//*--------------------------------------------------------------------*/ + +#include "vktSparseResourcesShaderIntrinsicsBase.hpp" + +namespace vkt +{ +namespace sparse +{ + +class SparseShaderIntrinsicsCaseStorage : public SparseShaderIntrinsicsCaseBase +{ +public: + SparseShaderIntrinsicsCaseStorage (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseBase(testCtx, name, function, imageType, imageSize, format) {} + + void initPrograms (vk::SourceCollections& programCollection) const; + + virtual std::string sparseImageTypeDecl(const std::string& imageType, + const std::string& componentType) const = 0; + + virtual std::string sparseImageOpString (const std::string& resultVariable, + const std::string& resultType, + const std::string& image, + const std::string& coord, + const std::string& mipLevel) const = 0; +}; + +class SparseCaseOpImageSparseFetch : public SparseShaderIntrinsicsCaseStorage +{ +public: + SparseCaseOpImageSparseFetch (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseStorage(testCtx, name, function, imageType, imageSize, format) {} + + TestInstance* createInstance(Context& context) const; + + std::string sparseImageTypeDecl(const std::string& imageType, const std::string& componentType) const; + + std::string sparseImageOpString(const std::string& resultVariable, const std::string& resultType, const std::string& image, const std::string& coord, const std::string& mipLevel) const; +}; + +class SparseCaseOpImageSparseRead : public SparseShaderIntrinsicsCaseStorage +{ +public: + SparseCaseOpImageSparseRead (tcu::TestContext& testCtx, + const std::string& name, + const SpirVFunction function, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseShaderIntrinsicsCaseStorage(testCtx, name, function, imageType, imageSize, format) {} + + TestInstance* createInstance(Context& context) const; + + std::string sparseImageTypeDecl(const std::string& imageType, const std::string& componentType) const; + + std::string sparseImageOpString(const std::string& resultVariable, const std::string& resultType, const std::string& image, const std::string& coord, const std::string& mipLevel) const; +}; + +} // sparse +} // vkt + +#endif // _VKTSPARSERESOURCESSHADERINTRINSICSSTORAGE_HPP \ No newline at end of file diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp index afcc95d..319f92d 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp @@ -29,6 +29,7 @@ #include "vktSparseResourcesMipmapSparseResidency.hpp" #include "vktSparseResourcesBufferMemoryAliasing.hpp" #include "vktSparseResourcesImageMemoryAliasing.hpp" +#include "vktSparseResourcesShaderIntrinsics.hpp" #include "deUniquePtr.hpp" namespace vkt @@ -47,7 +48,8 @@ tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx) sparseTests->addChild(createMipmapSparseResidencyTests(testCtx)); sparseTests->addChild(createBufferSparseMemoryAliasingTests(testCtx)); sparseTests->addChild(createImageSparseMemoryAliasingTests(testCtx)); - + sparseTests->addChild(createSparseResourcesShaderIntrinsicsTests(testCtx)); + return sparseTests.release(); } diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp index d7b3f1d..c5f4d0c 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp @@ -313,10 +313,11 @@ Move makePipelineLayout (const DeviceInterface& vk, return createPipelineLayout(vk, device, &pipelineLayoutParams); } -Move makeComputePipeline (const DeviceInterface& vk, - const VkDevice device, - const VkPipelineLayout pipelineLayout, - const VkShaderModule shaderModule) +Move makeComputePipeline (const DeviceInterface& vk, + const VkDevice device, + const VkPipelineLayout pipelineLayout, + const VkShaderModule shaderModule, + const VkSpecializationInfo* specializationInfo) { const VkPipelineShaderStageCreateInfo pipelineShaderStageParams = { @@ -326,7 +327,7 @@ Move makeComputePipeline (const DeviceInterface& vk, VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage; shaderModule, // VkShaderModule module; "main", // const char* pName; - DE_NULL, // const VkSpecializationInfo* pSpecializationInfo; + specializationInfo, // const VkSpecializationInfo* pSpecializationInfo; }; const VkComputePipelineCreateInfo pipelineCreateInfo = { @@ -716,6 +717,33 @@ std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format) return std::string() + orderPart + typePart; } +std::string getShaderImageCoordinates (const ImageType imageType, + const std::string& x, + const std::string& xy, + const std::string& xyz) +{ + switch (imageType) + { + case IMAGE_TYPE_1D: + case IMAGE_TYPE_BUFFER: + return x; + + case IMAGE_TYPE_1D_ARRAY: + case IMAGE_TYPE_2D: + return xy; + + case IMAGE_TYPE_2D_ARRAY: + case IMAGE_TYPE_3D: + case IMAGE_TYPE_CUBE: + case IMAGE_TYPE_CUBE_ARRAY: + return xyz; + + default: + DE_ASSERT(0); + return ""; + } +} + VkExtent3D mipLevelExtents (const VkExtent3D& baseExtents, const deUint32 mipLevel) { VkExtent3D result; @@ -811,5 +839,470 @@ VkSparseMemoryBind makeSparseMemoryBind (const vk::DeviceInterface& vk, return memoryBind; } +void beginRenderPass (const DeviceInterface& vk, + const VkCommandBuffer commandBuffer, + const VkRenderPass renderPass, + const VkFramebuffer framebuffer, + const VkRect2D& renderArea, + const std::vector& clearValues) +{ + + const VkRenderPassBeginInfo renderPassBeginInfo = { + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + renderPass, // VkRenderPass renderPass; + framebuffer, // VkFramebuffer framebuffer; + renderArea, // VkRect2D renderArea; + static_cast(clearValues.size()), // deUint32 clearValueCount; + &clearValues[0], // const VkClearValue* pClearValues; + }; + + vk.cmdBeginRenderPass(commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); +} + +void beginRenderPassWithRasterizationDisabled (const DeviceInterface& vk, + const VkCommandBuffer commandBuffer, + const VkRenderPass renderPass, + const VkFramebuffer framebuffer) +{ + const VkRect2D renderArea = {{ 0, 0 }, { 0, 0 }}; + + const VkRenderPassBeginInfo renderPassBeginInfo = { + VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + renderPass, // VkRenderPass renderPass; + framebuffer, // VkFramebuffer framebuffer; + renderArea, // VkRect2D renderArea; + 0u, // uint32_t clearValueCount; + DE_NULL, // const VkClearValue* pClearValues; + }; + + vk.cmdBeginRenderPass(commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE); +} + +void endRenderPass (const DeviceInterface& vk, + const VkCommandBuffer commandBuffer) +{ + vk.cmdEndRenderPass(commandBuffer); +} + +Move makeRenderPass (const DeviceInterface& vk, + const VkDevice device, + const VkFormat colorFormat) +{ + const VkAttachmentDescription colorAttachmentDescription = + { + (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags; + colorFormat, // VkFormat format; + VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples; + VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp; + VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp; + VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp; + VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout finalLayout; + }; + + const VkAttachmentReference colorAttachmentReference = + { + 0u, // deUint32 attachment; + VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout; + }; + + const VkAttachmentReference depthAttachmentReference = + { + VK_ATTACHMENT_UNUSED, // deUint32 attachment; + VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout layout; + }; + + const VkSubpassDescription subpassDescription = + { + (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags; + VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint; + 0u, // deUint32 inputAttachmentCount; + DE_NULL, // const VkAttachmentReference* pInputAttachments; + 1u, // deUint32 colorAttachmentCount; + &colorAttachmentReference, // const VkAttachmentReference* pColorAttachments; + DE_NULL, // const VkAttachmentReference* pResolveAttachments; + &depthAttachmentReference, // const VkAttachmentReference* pDepthStencilAttachment; + 0u, // deUint32 preserveAttachmentCount; + DE_NULL // const deUint32* pPreserveAttachments; + }; + + const VkRenderPassCreateInfo renderPassInfo = + { + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags; + 1u, // deUint32 attachmentCount; + &colorAttachmentDescription, // const VkAttachmentDescription* pAttachments; + 1u, // deUint32 subpassCount; + &subpassDescription, // const VkSubpassDescription* pSubpasses; + 0u, // deUint32 dependencyCount; + DE_NULL // const VkSubpassDependency* pDependencies; + }; + + return createRenderPass(vk, device, &renderPassInfo); +} + +Move makeRenderPassWithoutAttachments (const DeviceInterface& vk, + const VkDevice device) +{ + const VkAttachmentReference unusedAttachment = + { + VK_ATTACHMENT_UNUSED, // deUint32 attachment; + VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout layout; + }; + + const VkSubpassDescription subpassDescription = + { + (VkSubpassDescriptionFlags)0, // VkSubpassDescriptionFlags flags; + VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint; + 0u, // deUint32 inputAttachmentCount; + DE_NULL, // const VkAttachmentReference* pInputAttachments; + 0u, // deUint32 colorAttachmentCount; + DE_NULL, // const VkAttachmentReference* pColorAttachments; + DE_NULL, // const VkAttachmentReference* pResolveAttachments; + &unusedAttachment, // const VkAttachmentReference* pDepthStencilAttachment; + 0u, // deUint32 preserveAttachmentCount; + DE_NULL // const deUint32* pPreserveAttachments; + }; + + const VkRenderPassCreateInfo renderPassInfo = + { + VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags; + 0u, // deUint32 attachmentCount; + DE_NULL, // const VkAttachmentDescription* pAttachments; + 1u, // deUint32 subpassCount; + &subpassDescription, // const VkSubpassDescription* pSubpasses; + 0u, // deUint32 dependencyCount; + DE_NULL // const VkSubpassDependency* pDependencies; + }; + + return createRenderPass(vk, device, &renderPassInfo); +} + +Move makeFramebuffer (const DeviceInterface& vk, + const VkDevice device, + const VkRenderPass renderPass, + const VkImageView colorAttachment, + const deUint32 width, + const deUint32 height, + const deUint32 layers) +{ + const VkFramebufferCreateInfo framebufferInfo = { + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags; + renderPass, // VkRenderPass renderPass; + 1u, // uint32_t attachmentCount; + &colorAttachment, // const VkImageView* pAttachments; + width, // uint32_t width; + height, // uint32_t height; + layers, // uint32_t layers; + }; + + return createFramebuffer(vk, device, &framebufferInfo); +} + +Move makeFramebufferWithoutAttachments (const DeviceInterface& vk, + const VkDevice device, + const VkRenderPass renderPass) +{ + const VkFramebufferCreateInfo framebufferInfo = { + VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkFramebufferCreateFlags)0, // VkFramebufferCreateFlags flags; + renderPass, // VkRenderPass renderPass; + 0u, // uint32_t attachmentCount; + DE_NULL, // const VkImageView* pAttachments; + 0u, // uint32_t width; + 0u, // uint32_t height; + 0u, // uint32_t layers; + }; + + return createFramebuffer(vk, device, &framebufferInfo); +} + +GraphicsPipelineBuilder& GraphicsPipelineBuilder::setShader (const DeviceInterface& vk, + const VkDevice device, + const VkShaderStageFlagBits stage, + const ProgramBinary& binary, + const VkSpecializationInfo* specInfo) +{ + VkShaderModule module; + switch (stage) + { + case (VK_SHADER_STAGE_VERTEX_BIT): + DE_ASSERT(m_vertexShaderModule.get() == DE_NULL); + m_vertexShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0); + module = *m_vertexShaderModule; + break; + + case (VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT): + DE_ASSERT(m_tessControlShaderModule.get() == DE_NULL); + m_tessControlShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0); + module = *m_tessControlShaderModule; + break; + + case (VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT): + DE_ASSERT(m_tessEvaluationShaderModule.get() == DE_NULL); + m_tessEvaluationShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0); + module = *m_tessEvaluationShaderModule; + break; + + case (VK_SHADER_STAGE_GEOMETRY_BIT): + DE_ASSERT(m_geometryShaderModule.get() == DE_NULL); + m_geometryShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0); + module = *m_geometryShaderModule; + break; + + case (VK_SHADER_STAGE_FRAGMENT_BIT): + DE_ASSERT(m_fragmentShaderModule.get() == DE_NULL); + m_fragmentShaderModule = createShaderModule(vk, device, binary, (VkShaderModuleCreateFlags)0); + module = *m_fragmentShaderModule; + break; + + default: + DE_FATAL("Invalid shader stage"); + return *this; + } + + const VkPipelineShaderStageCreateInfo pipelineShaderStageInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineShaderStageCreateFlags)0, // VkPipelineShaderStageCreateFlags flags; + stage, // VkShaderStageFlagBits stage; + module, // VkShaderModule module; + "main", // const char* pName; + specInfo, // const VkSpecializationInfo* pSpecializationInfo; + }; + + m_shaderStageFlags |= stage; + m_shaderStages.push_back(pipelineShaderStageInfo); + + return *this; +} + +template +inline const T* dataPointer (const std::vector& vec) +{ + return (vec.size() != 0 ? &vec[0] : DE_NULL); +} + +Move GraphicsPipelineBuilder::build (const DeviceInterface& vk, + const VkDevice device, + const VkPipelineLayout pipelineLayout, + const VkRenderPass renderPass) +{ + const VkPipelineVertexInputStateCreateInfo vertexInputStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags; + static_cast(m_vertexInputBindings.size()), // uint32_t vertexBindingDescriptionCount; + dataPointer(m_vertexInputBindings), // const VkVertexInputBindingDescription* pVertexBindingDescriptions; + static_cast(m_vertexInputAttributes.size()), // uint32_t vertexAttributeDescriptionCount; + dataPointer(m_vertexInputAttributes), // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions; + }; + + const bool isTessellationEnabled = (m_shaderStageFlags & VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT) != 0; + + const VkPipelineInputAssemblyStateCreateInfo pipelineInputAssemblyStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineInputAssemblyStateCreateFlags)0, // VkPipelineInputAssemblyStateCreateFlags flags; + isTessellationEnabled ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : m_primitiveTopology, // VkPrimitiveTopology topology; + VK_FALSE, // VkBool32 primitiveRestartEnable; + }; + + const VkPipelineTessellationStateCreateInfo pipelineTessellationStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineTessellationStateCreateFlags)0, // VkPipelineTessellationStateCreateFlags flags; + m_patchControlPoints, // uint32_t patchControlPoints; + }; + + const VkViewport viewport = makeViewport + ( + 0.0f, 0.0f, + static_cast(m_renderSize.x()), static_cast(m_renderSize.y()), + 0.0f, 1.0f + ); + + const VkRect2D scissor = + { + makeOffset2D(0, 0), + makeExtent2D(m_renderSize.x(), m_renderSize.y()), + }; + + const VkPipelineViewportStateCreateInfo pipelineViewportStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags; + 1u, // uint32_t viewportCount; + &viewport, // const VkViewport* pViewports; + 1u, // uint32_t scissorCount; + &scissor, // const VkRect2D* pScissors; + }; + + const bool isRasterizationDisabled = ((m_shaderStageFlags & VK_SHADER_STAGE_FRAGMENT_BIT) == 0); + + const VkPipelineRasterizationStateCreateInfo pipelineRasterizationStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineRasterizationStateCreateFlags)0, // VkPipelineRasterizationStateCreateFlags flags; + VK_FALSE, // VkBool32 depthClampEnable; + isRasterizationDisabled, // VkBool32 rasterizerDiscardEnable; + VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode; + m_cullModeFlags, // VkCullModeFlags cullMode; + m_frontFace, // VkFrontFace frontFace; + VK_FALSE, // VkBool32 depthBiasEnable; + 0.0f, // float depthBiasConstantFactor; + 0.0f, // float depthBiasClamp; + 0.0f, // float depthBiasSlopeFactor; + 1.0f, // float lineWidth; + }; + + const VkPipelineMultisampleStateCreateInfo pipelineMultisampleStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineMultisampleStateCreateFlags)0, // VkPipelineMultisampleStateCreateFlags flags; + VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples; + VK_FALSE, // VkBool32 sampleShadingEnable; + 0.0f, // float minSampleShading; + DE_NULL, // const VkSampleMask* pSampleMask; + VK_FALSE, // VkBool32 alphaToCoverageEnable; + VK_FALSE, // VkBool32 alphaToOneEnable; + }; + + const VkStencilOpState stencilOpState = makeStencilOpState + ( + VK_STENCIL_OP_KEEP, // stencil fail + VK_STENCIL_OP_KEEP, // depth & stencil pass + VK_STENCIL_OP_KEEP, // depth only fail + VK_COMPARE_OP_NEVER, // compare op + 0u, // compare mask + 0u, // write mask + 0u // reference + ); + + const VkPipelineDepthStencilStateCreateInfo pipelineDepthStencilStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineDepthStencilStateCreateFlags)0, // VkPipelineDepthStencilStateCreateFlags flags; + VK_FALSE, // VkBool32 depthTestEnable; + VK_FALSE, // VkBool32 depthWriteEnable; + VK_COMPARE_OP_LESS, // VkCompareOp depthCompareOp; + VK_FALSE, // VkBool32 depthBoundsTestEnable; + VK_FALSE, // VkBool32 stencilTestEnable; + stencilOpState, // VkStencilOpState front; + stencilOpState, // VkStencilOpState back; + 0.0f, // float minDepthBounds; + 1.0f, // float maxDepthBounds; + }; + + const VkColorComponentFlags colorComponentsAll = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT; + + std::vector colorBlendAttachmentsStates; + + for (deUint32 attachmentNdx = 0; attachmentNdx < m_attachmentsCount; ++attachmentNdx) + { + const VkPipelineColorBlendAttachmentState colorBlendAttachmentState = + { + m_blendEnable, // VkBool32 blendEnable; + VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcColorBlendFactor; + VK_BLEND_FACTOR_ONE, // VkBlendFactor dstColorBlendFactor; + VK_BLEND_OP_ADD, // VkBlendOp colorBlendOp; + VK_BLEND_FACTOR_SRC_ALPHA, // VkBlendFactor srcAlphaBlendFactor; + VK_BLEND_FACTOR_ONE, // VkBlendFactor dstAlphaBlendFactor; + VK_BLEND_OP_ADD, // VkBlendOp alphaBlendOp; + colorComponentsAll, // VkColorComponentFlags colorWriteMask; + }; + + colorBlendAttachmentsStates.push_back(colorBlendAttachmentState); + } + + const VkPipelineColorBlendStateCreateInfo pipelineColorBlendStateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineColorBlendStateCreateFlags)0, // VkPipelineColorBlendStateCreateFlags flags; + VK_FALSE, // VkBool32 logicOpEnable; + VK_LOGIC_OP_COPY, // VkLogicOp logicOp; + static_cast(colorBlendAttachmentsStates.size()), // deUint32 attachmentCount; + dataPointer(colorBlendAttachmentsStates), // const VkPipelineColorBlendAttachmentState* pAttachments; + { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4]; + }; + + const bool hasDynamicState = static_cast(m_dynamicStates.size()) > 0u; + + const VkPipelineDynamicStateCreateInfo dynamicStateCreateInfo = + { + VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineDynamicStateCreateFlags)0, // VkPipelineDynamicStateCreateFlags flags; + static_cast(m_dynamicStates.size()), // deUint32 dynamicStateCount; + dataPointer(m_dynamicStates), // const VkDynamicState* pDynamicStates; + }; + + const VkGraphicsPipelineCreateInfo graphicsPipelineInfo = + { + VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + (VkPipelineCreateFlags)0, // VkPipelineCreateFlags flags; + static_cast(m_shaderStages.size()), // deUint32 stageCount; + dataPointer(m_shaderStages), // const VkPipelineShaderStageCreateInfo* pStages; + &vertexInputStateInfo, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState; + &pipelineInputAssemblyStateInfo, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState; + (isTessellationEnabled ? &pipelineTessellationStateInfo : DE_NULL), // const VkPipelineTessellationStateCreateInfo* pTessellationState; + (isRasterizationDisabled ? DE_NULL : &pipelineViewportStateInfo), // const VkPipelineViewportStateCreateInfo* pViewportState; + &pipelineRasterizationStateInfo, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState; + (isRasterizationDisabled ? DE_NULL : &pipelineMultisampleStateInfo), // const VkPipelineMultisampleStateCreateInfo* pMultisampleState; + (isRasterizationDisabled ? DE_NULL : &pipelineDepthStencilStateInfo), // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState; + (isRasterizationDisabled ? DE_NULL : &pipelineColorBlendStateInfo), // const VkPipelineColorBlendStateCreateInfo* pColorBlendState; + (hasDynamicState ? &dynamicStateCreateInfo : DE_NULL), // const VkPipelineDynamicStateCreateInfo* pDynamicState; + pipelineLayout, // VkPipelineLayout layout; + renderPass, // VkRenderPass renderPass; + 0u, // deUint32 subpass; + DE_NULL, // VkPipeline basePipelineHandle; + 0u, // deInt32 basePipelineIndex; + }; + + return createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineInfo); +} + +void requireFeatures (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const FeatureFlags flags) +{ + const VkPhysicalDeviceFeatures features = getPhysicalDeviceFeatures(vki, physDevice); + + if (((flags & FEATURE_TESSELLATION_SHADER) != 0) && !features.tessellationShader) + throw tcu::NotSupportedError("Tessellation shader not supported"); + + if (((flags & FEATURE_GEOMETRY_SHADER) != 0) && !features.geometryShader) + throw tcu::NotSupportedError("Geometry shader not supported"); + + if (((flags & FEATURE_SHADER_FLOAT_64) != 0) && !features.shaderFloat64) + throw tcu::NotSupportedError("Double-precision floats not supported"); + + if (((flags & FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS) != 0) && !features.vertexPipelineStoresAndAtomics) + throw tcu::NotSupportedError("SSBO and image writes not supported in vertex pipeline"); + + if (((flags & FEATURE_FRAGMENT_STORES_AND_ATOMICS) != 0) && !features.fragmentStoresAndAtomics) + throw tcu::NotSupportedError("SSBO and image writes not supported in fragment shader"); + + if (((flags & FEATURE_SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE) != 0) && !features.shaderTessellationAndGeometryPointSize) + throw tcu::NotSupportedError("Tessellation and geometry shaders don't support PointSize built-in"); +} + } // sparse } // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp index 3ac4162..88d967d 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp @@ -51,17 +51,10 @@ enum ImageType IMAGE_TYPE_LAST }; -vk::VkImageType mapImageType (const ImageType imageType); -vk::VkImageViewType mapImageViewType (const ImageType imageType); -std::string getImageTypeName (const ImageType imageType); -std::string getShaderImageType (const tcu::TextureFormat& format, const ImageType imageType); -std::string getShaderImageDataType (const tcu::TextureFormat& format); -std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format); - class Buffer { public: - Buffer (const vk::DeviceInterface& vk, + Buffer (const vk::DeviceInterface& deviceInterface, const vk::VkDevice device, vk::Allocator& allocator, const vk::VkBufferCreateInfo& bufferCreateInfo, @@ -82,7 +75,7 @@ private: class Image { public: - Image (const vk::DeviceInterface& vk, + Image (const vk::DeviceInterface& deviceInterface, const vk::VkDevice device, vk::Allocator& allocator, const vk::VkImageCreateInfo& imageCreateInfo, @@ -100,19 +93,122 @@ private: Image& operator= (const Image&); }; -tcu::UVec3 getShaderGridSize (const ImageType imageType, - const tcu::UVec3& imageSize, - const deUint32 mipLevel = 0); //!< Size used for addresing image in a shader -tcu::UVec3 getLayerSize (const ImageType imageType, const tcu::UVec3& imageSize); //!< Size of a single layer -deUint32 getNumLayers (const ImageType imageType, const tcu::UVec3& imageSize); //!< Number of array layers (for array and cube types) -deUint32 getNumPixels (const ImageType imageType, const tcu::UVec3& imageSize); //!< Number of texels in an image -deUint32 getDimensions (const ImageType imageType); //!< Coordinate dimension used for addressing (e.g. 3 (x,y,z) for 2d array) -deUint32 getLayerDimensions (const ImageType imageType); //!< Coordinate dimension used for addressing a single layer (e.g. 2 (x,y) for 2d array) +class GraphicsPipelineBuilder +{ +public: + GraphicsPipelineBuilder (void) : m_renderSize (0, 0) + , m_shaderStageFlags (0u) + , m_cullModeFlags (vk::VK_CULL_MODE_NONE) + , m_frontFace (vk::VK_FRONT_FACE_COUNTER_CLOCKWISE) + , m_patchControlPoints (1u) + , m_attachmentsCount (1u) + , m_blendEnable (false) + , m_primitiveTopology (vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST) {} + + GraphicsPipelineBuilder& setRenderSize (const tcu::IVec2& size) { m_renderSize = size; return *this; } + GraphicsPipelineBuilder& setShader (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkShaderStageFlagBits stage, const vk::ProgramBinary& binary, const vk::VkSpecializationInfo* specInfo); + GraphicsPipelineBuilder& setPatchControlPoints (const deUint32 controlPoints) { m_patchControlPoints = controlPoints; return *this; } + GraphicsPipelineBuilder& setAttachmentsCount (const deUint32 attachmentsCount) { m_attachmentsCount = attachmentsCount; return *this; } + GraphicsPipelineBuilder& setCullModeFlags (const vk::VkCullModeFlags cullModeFlags) { m_cullModeFlags = cullModeFlags; return *this; } + GraphicsPipelineBuilder& setFrontFace (const vk::VkFrontFace frontFace) { m_frontFace = frontFace; return *this; } + GraphicsPipelineBuilder& setBlend (const bool enable) { m_blendEnable = enable; return *this; } + + //! Applies only to pipelines without tessellation shaders. + GraphicsPipelineBuilder& setPrimitiveTopology (const vk::VkPrimitiveTopology topology) { m_primitiveTopology = topology; return *this; } + + GraphicsPipelineBuilder& addVertexBinding (const vk::VkVertexInputBindingDescription vertexBinding) { m_vertexInputBindings.push_back(vertexBinding); return *this; } + GraphicsPipelineBuilder& addVertexAttribute (const vk::VkVertexInputAttributeDescription vertexAttribute) { m_vertexInputAttributes.push_back(vertexAttribute); return *this; } + GraphicsPipelineBuilder& addDynamicState (const vk::VkDynamicState dynamicState) { m_dynamicStates.push_back(dynamicState); return *this; } + + vk::Move build (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkPipelineLayout pipelineLayout, const vk::VkRenderPass renderPass); -bool isImageSizeSupported (const vk::InstanceInterface& instance, - const vk::VkPhysicalDevice physicalDevice, - const ImageType imageType, - const tcu::UVec3& imageSize); +private: + tcu::IVec2 m_renderSize; + vk::Move m_vertexShaderModule; + vk::Move m_fragmentShaderModule; + vk::Move m_geometryShaderModule; + vk::Move m_tessControlShaderModule; + vk::Move m_tessEvaluationShaderModule; + std::vector m_shaderStages; + std::vector m_vertexInputBindings; + std::vector m_vertexInputAttributes; + std::vector m_dynamicStates; + vk::VkShaderStageFlags m_shaderStageFlags; + vk::VkCullModeFlags m_cullModeFlags; + vk::VkFrontFace m_frontFace; + deUint32 m_patchControlPoints; + deUint32 m_attachmentsCount; + bool m_blendEnable; + vk::VkPrimitiveTopology m_primitiveTopology; + + GraphicsPipelineBuilder (const GraphicsPipelineBuilder&); + GraphicsPipelineBuilder& operator= (const GraphicsPipelineBuilder&); +}; + +enum FeatureFlagBits +{ + FEATURE_TESSELLATION_SHADER = 1u << 0, + FEATURE_GEOMETRY_SHADER = 1u << 1, + FEATURE_SHADER_FLOAT_64 = 1u << 2, + FEATURE_VERTEX_PIPELINE_STORES_AND_ATOMICS = 1u << 3, + FEATURE_FRAGMENT_STORES_AND_ATOMICS = 1u << 4, + FEATURE_SHADER_TESSELLATION_AND_GEOMETRY_POINT_SIZE = 1u << 5, +}; +typedef deUint32 FeatureFlags; + +// Image helper functions +vk::VkImageType mapImageType (const ImageType imageType); +vk::VkImageViewType mapImageViewType (const ImageType imageType); +std::string getImageTypeName (const ImageType imageType); +std::string getShaderImageType (const tcu::TextureFormat& format, + const ImageType imageType); +std::string getShaderImageDataType (const tcu::TextureFormat& format); +std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format); +std::string getShaderImageCoordinates (const ImageType imageType, + const std::string& x, + const std::string& xy, + const std::string& xyz); +//!< Size used for addresing image in a compute shader +tcu::UVec3 getShaderGridSize (const ImageType imageType, + const tcu::UVec3& imageSize, + const deUint32 mipLevel = 0); +//!< Size of a single image layer +tcu::UVec3 getLayerSize (const ImageType imageType, + const tcu::UVec3& imageSize); +//!< Number of array layers (for array and cube types) +deUint32 getNumLayers (const ImageType imageType, + const tcu::UVec3& imageSize); +//!< Number of texels in an image +deUint32 getNumPixels (const ImageType imageType, + const tcu::UVec3& imageSize); +//!< Coordinate dimension used for addressing (e.g. 3 (x,y,z) for 2d array) +deUint32 getDimensions (const ImageType imageType); +//!< Coordinate dimension used for addressing a single layer (e.g. 2 (x,y) for 2d array) +deUint32 getLayerDimensions (const ImageType imageType); +//!< Helper function for checking if requested image size does not exceed device limits +bool isImageSizeSupported (const vk::InstanceInterface& instance, + const vk::VkPhysicalDevice physicalDevice, + const ImageType imageType, + const tcu::UVec3& imageSize); + +vk::VkExtent3D mipLevelExtents (const vk::VkExtent3D& baseExtents, + const deUint32 mipLevel); + +tcu::UVec3 mipLevelExtents (const tcu::UVec3& baseExtents, + const deUint32 mipLevel); + +deUint32 getImageMaxMipLevels (const vk::VkImageFormatProperties& imageFormatProperties, + const vk::VkExtent3D& extent); + +deUint32 getImageMipLevelSizeInBytes (const vk::VkExtent3D& baseExtents, + const deUint32 layersCount, + const tcu::TextureFormat& format, + const deUint32 mipmapLevel); + +deUint32 getImageSizeInBytes (const vk::VkExtent3D& baseExtents, + const deUint32 layersCount, + const tcu::TextureFormat& format, + const deUint32 mipmapLevelsCount = 1u); vk::Move makeCommandPool (const vk::DeviceInterface& vk, const vk::VkDevice device, @@ -129,7 +225,8 @@ vk::Move makePipelineLayout (const vk::DeviceInterface& vk::Move makeComputePipeline (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkPipelineLayout pipelineLayout, - const vk::VkShaderModule shaderModule); + const vk::VkShaderModule shaderModule, + const vk::VkSpecializationInfo* specializationInfo = 0); vk::Move makeBufferView (const vk::DeviceInterface& vk, const vk::VkDevice device, @@ -211,25 +308,6 @@ void submitCommandsAndWait (const vk::DeviceInterface& vk, const deUint32 signalSemaphoreCount = 0, const vk::VkSemaphore* pSignalSemaphores = DE_NULL); -vk::VkExtent3D mipLevelExtents (const vk::VkExtent3D& baseExtents, - const deUint32 mipLevel); - -tcu::UVec3 mipLevelExtents (const tcu::UVec3& baseExtents, - const deUint32 mipLevel); - -deUint32 getImageMaxMipLevels (const vk::VkImageFormatProperties& imageFormatProperties, - const vk::VkExtent3D& extent); - -deUint32 getImageMipLevelSizeInBytes (const vk::VkExtent3D& baseExtents, - const deUint32 layersCount, - const tcu::TextureFormat& format, - const deUint32 mipmapLevel); - -deUint32 getImageSizeInBytes (const vk::VkExtent3D& baseExtents, - const deUint32 layersCount, - const tcu::TextureFormat& format, - const deUint32 mipmapLevelsCount = 1u); - vk::VkSparseImageMemoryBind makeSparseImageMemoryBind (const vk::DeviceInterface& vk, const vk::VkDevice device, const vk::VkDeviceSize allocationSize, @@ -243,12 +321,57 @@ vk::VkSparseMemoryBind makeSparseMemoryBind (const vk::DeviceInterface& vk const vk::VkDeviceSize allocationSize, const deUint32 memoryType, const vk::VkDeviceSize resourceOffset); + +vk::Move makeRenderPass (const vk::DeviceInterface& vk, + const vk::VkDevice device, + const vk::VkFormat colorFormat); + +vk::Move makeRenderPassWithoutAttachments(const vk::DeviceInterface& vk, + const vk::VkDevice device); + +vk::Move makeFramebuffer (const vk::DeviceInterface& vk, + const vk::VkDevice device, + const vk::VkRenderPass renderPass, + const vk::VkImageView colorAttachment, + const deUint32 width, + const deUint32 height, + const deUint32 layers); + +vk::Move makeFramebufferWithoutAttachments (const vk::DeviceInterface& vk, + const vk::VkDevice device, + const vk::VkRenderPass renderPass); + +void beginRenderPass (const vk::DeviceInterface& vk, + const vk::VkCommandBuffer commandBuffer, + const vk::VkRenderPass renderPass, + const vk::VkFramebuffer framebuffer, + const vk::VkRect2D& renderArea, + const std::vector& clearValues); + +void beginRenderPassWithRasterizationDisabled(const vk::DeviceInterface& vk, + const vk::VkCommandBuffer commandBuffer, + const vk::VkRenderPass renderPass, + const vk::VkFramebuffer framebuffer); + +void endRenderPass (const vk::DeviceInterface& vk, + const vk::VkCommandBuffer commandBuffer); + +void requireFeatures (const vk::InstanceInterface& vki, + const vk::VkPhysicalDevice physicalDevice, + const FeatureFlags flags); + template inline de::SharedPtr > makeVkSharedPtr (vk::Move vkMove) { return de::SharedPtr >(new vk::Unique(vkMove)); } +template +inline std::size_t sizeInBytes(const std::vector& vec) +{ + return vec.size() * sizeof(vec[0]); +} + } // sparse } // vkt diff --git a/external/vulkancts/mustpass/1.0.1/com.drawelements.deqp.vk.xml b/external/vulkancts/mustpass/1.0.1/com.drawelements.deqp.vk.xml index 6619323..83cfce2 100644 --- a/external/vulkancts/mustpass/1.0.1/com.drawelements.deqp.vk.xml +++ b/external/vulkancts/mustpass/1.0.1/com.drawelements.deqp.vk.xml @@ -308690,6 +308690,1126 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/vulkancts/mustpass/1.0.1/vk-default.txt b/external/vulkancts/mustpass/1.0.1/vk-default.txt index b227aaa..fa06c87 100644 --- a/external/vulkancts/mustpass/1.0.1/vk-default.txt +++ b/external/vulkancts/mustpass/1.0.1/vk-default.txt @@ -96338,6 +96338,318 @@ dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.256_256_16 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.128_128_8 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.503_137_3 dEQP-VK.sparse_resources.image_sparse_memory_aliasing.3d.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r32i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r16i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r16i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r16i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r16i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r8i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r8i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r8i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.r8i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba32ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba32ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba16ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba16ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba8ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_fetch.rgba8ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r32i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r32i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r32i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r32i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r16i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r16i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r16i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r16i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r8i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r8i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r8i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.r8i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_fetch.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r32i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r32i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r32i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r32i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r16i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r16i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r16i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r16i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r8i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r8i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r8i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.r8i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba32ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba32ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba16ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba16ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba8ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_read.rgba8ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r32i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r32i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r32i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r32i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r16i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r16i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r16i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r16i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r8i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r8i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r8i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.r8i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_read.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r32i.256_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r32i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r32i.137_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r32i.11_11_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r16i.256_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r16i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r16i.137_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r16i.11_11_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r8i.256_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r8i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r8i.137_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.r8i.11_11_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba32ui.256_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba32ui.137_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba32ui.11_11_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba16ui.256_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba16ui.137_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba16ui.11_11_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba8ui.256_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba8ui.137_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_sparse_read.rgba8ui.11_11_1 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r32i.256_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r32i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r32i.137_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r32i.11_11_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r16i.256_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r16i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r16i.137_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r16i.11_11_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r8i.256_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r8i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r8i.137_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.r8i.11_11_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba32ui.256_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba32ui.137_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba32ui.11_11_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba16ui.256_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba16ui.137_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba16ui.11_11_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba8ui.256_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba8ui.137_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.cube_array_sparse_read.rgba8ui.11_11_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r32i.256_256_16 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r32i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r32i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r32i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r16i.256_256_16 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r16i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r16i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r16i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r8i.256_256_16 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r8i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r8i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.r8i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba32ui.256_256_16 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba16ui.256_256_16 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba8ui.256_256_16 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.3d_sparse_read.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r32i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r32i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r32i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r32i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r16i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r16i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r16i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r16i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r8i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r8i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r8i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.r8i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba32ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba32ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba16ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba16ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba8ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_explicit_lod.rgba8ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r32i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r32i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r32i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r32i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r16i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r16i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r16i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r16i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r8i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r8i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r8i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.r8i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_explicit_lod.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r32i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r32i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r32i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r32i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r16i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r16i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r16i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r16i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r8i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r8i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r8i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.r8i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba32ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba32ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba16ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba16ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba8ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_sample_implicit_lod.rgba8ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r32i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r32i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r32i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r32i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r16i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r16i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r16i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r16i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r8i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r8i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r8i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.r8i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_sample_implicit_lod.rgba8ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r32i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r32i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r32i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r32i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r16i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r16i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r16i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r16i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r8i.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r8i.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r8i.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.r8i.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba32ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba32ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba32ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba32ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba16ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba16ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba16ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba16ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba8ui.512_256_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba8ui.128_128_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba8ui.503_137_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_sparse_gather.rgba8ui.11_37_1 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r32i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r32i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r32i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r32i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r16i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r16i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r16i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r16i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r8i.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r8i.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r8i.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.r8i.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba32ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba32ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba32ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba32ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba16ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba16ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba16ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba16ui.11_37_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba8ui.512_256_6 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba8ui.128_128_8 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba8ui.503_137_3 +dEQP-VK.sparse_resources.shader_intrinsics.2d_array_sparse_gather.rgba8ui.11_37_3 dEQP-VK.tessellation.limits.max_tessellation_generation_level dEQP-VK.tessellation.limits.max_tessellation_patch_size dEQP-VK.tessellation.limits.max_tessellation_control_per_vertex_input_components -- 2.7.4