From 029550532951eec8a34b752f1ea6957a4f1c69d9 Mon Sep 17 00:00:00 2001 From: Kantoch Date: Wed, 24 Feb 2016 10:31:26 +0100 Subject: [PATCH] Sparse Image Residency Test --- .../modules/vulkan/sparse_resources/CMakeLists.txt | 2 + .../vktSparseResourcesImageSparseResidency.cpp | 816 +++++++++++++++++++++ .../vktSparseResourcesImageSparseResidency.hpp | 46 ++ .../sparse_resources/vktSparseResourcesTests.cpp | 2 + .../vktSparseResourcesTestsUtil.cpp | 18 + .../vktSparseResourcesTestsUtil.hpp | 1 + 6 files changed, 885 insertions(+) create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp create mode 100644 external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp diff --git a/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt b/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt index 065def1..acde67e 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt +++ b/external/vulkancts/modules/vulkan/sparse_resources/CMakeLists.txt @@ -1,6 +1,8 @@ include_directories(..) set(DEQP_VK_IMAGE_SRCS + vktSparseResourcesImageSparseResidency.cpp + vktSparseResourcesImageSparseResidency.hpp vktSparseResourcesBufferSparseResidency.cpp vktSparseResourcesBufferSparseResidency.hpp vktSparseResourcesImageSparseBinding.cpp diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp new file mode 100644 index 0000000..e81fe0c --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.cpp @@ -0,0 +1,816 @@ +/*------------------------------------------------------------------------ +* Vulkan Conformance Tests +* ------------------------ +* +* Copyright (c) 2016 The Khronos Group Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and/or associated documentation files (the +* "Materials"), to deal in the Materials without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Materials, and to +* permit persons to whom the Materials are furnished to do so, subject to +* the following conditions: +* +* The above copyright notice(s) and this permission notice shall be included +* in all copies or substantial portions of the Materials. +* +* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +* +*//*! +* \file vktSparseResourcesImageSparseResidency.cpp +* \brief Sparse partially resident images tests +*//*--------------------------------------------------------------------*/ + +#include "vktSparseResourcesBufferSparseBinding.hpp" +#include "vktSparseResourcesTestsUtil.hpp" +#include "vktSparseResourcesBase.hpp" +#include "vktTestCaseUtil.hpp" + +#include "vkDefs.hpp" +#include "vkRef.hpp" +#include "vkRefUtil.hpp" +#include "vkPlatform.hpp" +#include "vkPrograms.hpp" +#include "vkMemUtil.hpp" +#include "vkBuilderUtil.hpp" +#include "vkImageUtil.hpp" +#include "vkQueryUtil.hpp" +#include "vkTypeUtil.hpp" + +#include "deUniquePtr.hpp" +#include "deStringUtil.hpp" + +#include +#include + +using namespace vk; + +namespace vkt +{ +namespace sparse +{ +namespace +{ + +const std::string getCoordStr (const ImageType imageType, + const std::string& x, + const std::string& y, + const std::string& z) +{ + switch (imageType) + { + case IMAGE_TYPE_1D: + case IMAGE_TYPE_BUFFER: + return x; + + case IMAGE_TYPE_1D_ARRAY: + case IMAGE_TYPE_2D: + return "ivec2(" + x + "," + y + ")"; + + case IMAGE_TYPE_2D_ARRAY: + case IMAGE_TYPE_3D: + case IMAGE_TYPE_CUBE: + case IMAGE_TYPE_CUBE_ARRAY: + return "ivec3(" + x + "," + y + "," + z + ")"; + + default: + DE_ASSERT(false); + return ""; + } +} + +deUint32 getNumUsedChannels (const tcu::TextureFormat& format) +{ + switch (format.order) + { + case tcu::TextureFormat::R: return 1; + case tcu::TextureFormat::A: return 1; + case tcu::TextureFormat::I: return 1; + case tcu::TextureFormat::L: return 1; + case tcu::TextureFormat::LA: return 2; + case tcu::TextureFormat::RG: return 2; + case tcu::TextureFormat::RA: return 2; + case tcu::TextureFormat::RGB: return 3; + case tcu::TextureFormat::RGBA: return 4; + case tcu::TextureFormat::ARGB: return 4; + case tcu::TextureFormat::BGR: return 3; + case tcu::TextureFormat::BGRA: return 4; + case tcu::TextureFormat::sR: return 1; + case tcu::TextureFormat::sRG: return 2; + case tcu::TextureFormat::sRGB: return 3; + case tcu::TextureFormat::sRGBA: return 4; + case tcu::TextureFormat::sBGR: return 3; + case tcu::TextureFormat::sBGRA: return 4; + case tcu::TextureFormat::D: return 1; + case tcu::TextureFormat::S: return 1; + case tcu::TextureFormat::DS: return 2; + default: + DE_ASSERT(DE_FALSE); + return 0; + } +} + +tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor) +{ + tcu::UVec3 result; + + result.x() = extent.width / divisor.width + ((extent.width % divisor.width) ? 1u : 0u); + result.y() = extent.height / divisor.height + ((extent.height % divisor.height) ? 1u : 0u); + result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth) ? 1u : 0u); + + return result; +} + +tcu::UVec3 computeWorkGroupSize (const tcu::UVec3& gridSize) +{ + const deUint32 maxComputeWorkGroupInvocations = 128u; + const tcu::UVec3 maxComputeWorkGroupSize = tcu::UVec3(128u, 128u, 64u); + + const deUint32 xWorkGroupSize = std::min(std::min(gridSize.x(), maxComputeWorkGroupSize.x()), maxComputeWorkGroupInvocations); + const deUint32 yWorkGroupSize = std::min(std::min(gridSize.y(), maxComputeWorkGroupSize.y()), maxComputeWorkGroupInvocations / xWorkGroupSize); + const deUint32 zWorkGroupSize = std::min(std::min(gridSize.z(), maxComputeWorkGroupSize.z()), maxComputeWorkGroupInvocations / (xWorkGroupSize*yWorkGroupSize)); + + return tcu::UVec3(xWorkGroupSize, yWorkGroupSize, zWorkGroupSize); +} + +class ImageSparseResidencyCase : public TestCase +{ +public: + ImageSparseResidencyCase (tcu::TestContext& testCtx, + const std::string& name, + const std::string& description, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format, + const glu::GLSLVersion glslVersion); + + void initPrograms (SourceCollections& sourceCollections) const; + TestInstance* createInstance (Context& context) const; + +private: + const ImageType m_imageType; + const tcu::UVec3 m_imageSize; + const tcu::TextureFormat m_format; + const glu::GLSLVersion m_glslVersion; +}; + +ImageSparseResidencyCase::ImageSparseResidencyCase (tcu::TestContext& testCtx, + const std::string& name, + const std::string& description, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format, + const glu::GLSLVersion glslVersion) + : TestCase (testCtx, name, description) + , m_imageType (imageType) + , m_imageSize (imageSize) + , m_format (format) + , m_glslVersion (glslVersion) +{ +} + + +void ImageSparseResidencyCase::initPrograms (SourceCollections& sourceCollections) const +{ + // Create compute program + const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion); + const std::string imageTypeStr = getShaderImageType(m_format, m_imageType); + const std::string formatQualifierStr = getShaderImageFormatQualifier(m_format); + const std::string formatDataStr = getShaderImageDataType(m_format); + const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize); + const tcu::UVec3 workGroupSize = computeWorkGroupSize(gridSize); + + std::ostringstream src; + src << versionDecl << "\n" + << "layout (local_size_x = " << workGroupSize.x() << ", local_size_y = " << workGroupSize.y() << ", local_size_z = " << workGroupSize.z() << ") in; \n" + << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform highp " << imageTypeStr << " u_image;\n" + << "void main (void)\n" + << "{\n" + << " if( gl_GlobalInvocationID.x < " << gridSize.x() << " ) \n" + << " if( gl_GlobalInvocationID.y < " << gridSize.y() << " ) \n" + << " if( gl_GlobalInvocationID.z < " << gridSize.z() << " ) \n" + << " {\n" + << " imageStore(u_image, " << getCoordStr(m_imageType, "gl_GlobalInvocationID.x", "gl_GlobalInvocationID.y", "gl_GlobalInvocationID.z") << "," + << formatDataStr << "( int(gl_GlobalInvocationID.x) % 127, int(gl_GlobalInvocationID.y) % 127, int(gl_GlobalInvocationID.z) % 127, 1));\n" + << " }\n" + << "}\n"; + + sourceCollections.glslSources.add("comp") << glu::ComputeSource(src.str()); +} + +class ImageSparseResidencyInstance : public SparseResourcesBaseInstance +{ +public: + ImageSparseResidencyInstance(Context& context, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format); + + tcu::TestStatus iterate (void); + +private: + const ImageType m_imageType; + const tcu::UVec3 m_imageSize; + const tcu::TextureFormat m_format; +}; + +ImageSparseResidencyInstance::ImageSparseResidencyInstance (Context& context, + const ImageType imageType, + const tcu::UVec3& imageSize, + const tcu::TextureFormat& format) + : SparseResourcesBaseInstance (context) + , m_imageType (imageType) + , m_imageSize (imageSize) + , m_format (format) +{ +} + +tcu::TestStatus ImageSparseResidencyInstance::iterate (void) +{ + const InstanceInterface& instance = m_context.getInstanceInterface(); + const DeviceInterface& deviceInterface = m_context.getDeviceInterface(); + const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice(); + const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instance, physicalDevice); + + switch (mapImageType(m_imageType)) + { + case VK_IMAGE_TYPE_2D: + { + if (deviceFeatures.sparseResidencyImage2D == false) + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Sparse residency for 2D Image not supported"); + } + break; + case VK_IMAGE_TYPE_3D: + { + if (deviceFeatures.sparseResidencyImage3D == false) + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Sparse residency for 3D Image not supported"); + + } + break; + default: + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Not supported image type"); + }; + + // Check if the image format supports sparse operations + const std::vector sparseImageFormatPropVec = + getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, mapTextureFormat(m_format), mapImageType(m_imageType), + VK_SAMPLE_COUNT_1_BIT, VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_TILING_OPTIMAL); + + if (sparseImageFormatPropVec.size() == 0) + { + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "The image format does not support sparse operations"); + } + + const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice); + + if (isImageSizeSupported(m_imageType, m_imageSize, deviceProperties.limits) == false) + { + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Image size not supported for device"); + } + + QueueRequirementsVec queueRequirements; + queueRequirements.push_back(QueueRequirements(VK_QUEUE_SPARSE_BINDING_BIT, 1u)); + queueRequirements.push_back(QueueRequirements(VK_QUEUE_COMPUTE_BIT, 1u)); + + // Create logical device supporting both sparse and compute queues + if (!createDeviceSupportingQueues(queueRequirements)) + { + return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Could not create device supporting sparse and compute queue"); + } + + const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice); + + // Create memory allocator for logical device + const de::UniquePtr allocator(new SimpleAllocator(deviceInterface, *m_logicalDevice, deviceMemoryProperties)); + + // Create queue supporting sparse binding operations + const Queue& sparseQueue = getQueue(VK_QUEUE_SPARSE_BINDING_BIT, 0); + + // Create queue supporting compute and transfer operations + const Queue& computeQueue = getQueue(VK_QUEUE_COMPUTE_BIT, 0); + + VkImageCreateInfo imageCreateInfo; + + imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; //VkStructureType sType; + imageCreateInfo.pNext = DE_NULL; //const void* pNext; + imageCreateInfo.flags = VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT; //VkImageCreateFlags flags; + imageCreateInfo.imageType = mapImageType(m_imageType); //VkImageType imageType; + imageCreateInfo.format = mapTextureFormat(m_format); //VkFormat format; + imageCreateInfo.extent = makeExtent3D(getLayerSize(m_imageType, m_imageSize)); //VkExtent3D extent; + imageCreateInfo.mipLevels = 1u; //deUint32 mipLevels; + imageCreateInfo.arrayLayers = getNumLayers(m_imageType, m_imageSize); //deUint32 arrayLayers; + imageCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; //VkSampleCountFlagBits samples; + imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; //VkImageTiling tiling; + imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; //VkImageLayout initialLayout; + imageCreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | + VK_IMAGE_USAGE_STORAGE_BIT; //VkImageUsageFlags usage; + imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE; //VkSharingMode sharingMode; + imageCreateInfo.queueFamilyIndexCount = 0u; //deUint32 queueFamilyIndexCount; + imageCreateInfo.pQueueFamilyIndices = DE_NULL; //const deUint32* pQueueFamilyIndices; + + if (m_imageType == IMAGE_TYPE_CUBE || m_imageType == IMAGE_TYPE_CUBE_ARRAY) + { + imageCreateInfo.flags |= VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT; + } + + const deUint32 queueFamilyIndices[] = { sparseQueue.queueFamilyIndex, computeQueue.queueFamilyIndex }; + + if (sparseQueue.queueFamilyIndex != computeQueue.queueFamilyIndex) + { + imageCreateInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; //VkSharingMode sharingMode; + imageCreateInfo.queueFamilyIndexCount = 2u; //deUint32 queueFamilyIndexCount; + imageCreateInfo.pQueueFamilyIndices = queueFamilyIndices; //const deUint32* pQueueFamilyIndices; + } + + // Create sparse image + const Unique sparseImage(createImage(deviceInterface, *m_logicalDevice, &imageCreateInfo)); + + // Get image general memory requirements + const VkMemoryRequirements imageMemoryRequirements = getImageMemoryRequirements(deviceInterface, *m_logicalDevice, *sparseImage); + + if (imageMemoryRequirements.size > deviceProperties.limits.sparseAddressSpaceSize) + { + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Required memory size for sparse resource exceeds device limits"); + } + + DE_ASSERT((imageMemoryRequirements.size % imageMemoryRequirements.alignment) == 0); + + // Get image sparse memory requirements + deUint32 sparseMemoryReqCount = 0; + + deviceInterface.getImageSparseMemoryRequirements(*m_logicalDevice, *sparseImage, &sparseMemoryReqCount, DE_NULL); + + DE_ASSERT(sparseMemoryReqCount != 0); + + std::vector sparseImageMemoryRequirements; + sparseImageMemoryRequirements.resize(sparseMemoryReqCount); + + deviceInterface.getImageSparseMemoryRequirements(*m_logicalDevice, *sparseImage, &sparseMemoryReqCount, &sparseImageMemoryRequirements[0]); + + // Make sure the image type includes color aspect + deUint32 colorAspectIndex = NO_MATCH_FOUND; + + for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx) + { + if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) + { + colorAspectIndex = memoryReqNdx; + break; + } + } + + if (colorAspectIndex == NO_MATCH_FOUND) + { + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT"); + } + + const VkSparseImageMemoryRequirements aspectRequirements = sparseImageMemoryRequirements[colorAspectIndex]; + const VkImageAspectFlags aspectMask = aspectRequirements.formatProperties.aspectMask; + const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity; + + DE_ASSERT((aspectRequirements.imageMipTailSize % imageMemoryRequirements.alignment) == 0); + + typedef de::SharedPtr< Unique > DeviceMemoryUniquePtr; + + std::vector imageResidencyMemoryBinds; + std::vector imageMipTailMemoryBinds; + std::vector deviceMemUniquePtrVec; + const deUint32 memoryType = findMatchingMemoryType(deviceMemoryProperties, imageMemoryRequirements, MemoryRequirement::Any); + + if (memoryType == NO_MATCH_FOUND) + { + return tcu::TestStatus(QP_TEST_RESULT_FAIL, "No matching memory type found"); + } + + // Bind device memory for each aspect + for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx) + { + for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx) + { + const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx); + const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity); + const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width, + mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height, + mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth ); + + for (deUint32 z = 0; z < numSparseBinds.z(); ++z) + for (deUint32 y = 0; y < numSparseBinds.y(); ++y) + for (deUint32 x = 0; x < numSparseBinds.x(); ++x) + { + const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z(); + + if (linearIndex % 2 == 1) + { + continue; + } + + const VkMemoryAllocateInfo allocInfo = + { + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + imageMemoryRequirements.alignment, // VkDeviceSize allocationSize; + memoryType, // deUint32 memoryTypeIndex; + }; + + VkDeviceMemory deviceMemory = 0; + VK_CHECK(deviceInterface.allocateMemory(*m_logicalDevice, &allocInfo, DE_NULL, &deviceMemory)); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(deviceMemory), Deleter(deviceInterface, *m_logicalDevice, DE_NULL)))); + + VkOffset3D offset; + offset.x = x*imageGranularity.width; + offset.y = y*imageGranularity.height; + offset.z = z*imageGranularity.depth; + + VkExtent3D extent; + extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width; + extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height; + extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth; + + VkSparseImageMemoryBind imageMemoryBind; + imageMemoryBind.subresource.aspectMask = aspectMask; + imageMemoryBind.subresource.mipLevel = mipLevelNdx; + imageMemoryBind.subresource.arrayLayer = layerNdx; + imageMemoryBind.memory = deviceMemory; + imageMemoryBind.memoryOffset = 0u; + imageMemoryBind.flags = 0u; + imageMemoryBind.offset = offset; + imageMemoryBind.extent = extent; + + imageResidencyMemoryBinds.push_back(imageMemoryBind); + } + } + + if (!(aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels) + { + const VkMemoryAllocateInfo allocInfo = + { + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + aspectRequirements.imageMipTailSize, // VkDeviceSize allocationSize; + memoryType, // deUint32 memoryTypeIndex; + }; + + VkDeviceMemory deviceMemory = 0; + VK_CHECK(deviceInterface.allocateMemory(*m_logicalDevice, &allocInfo, DE_NULL, &deviceMemory)); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(deviceMemory), Deleter(deviceInterface, *m_logicalDevice, DE_NULL)))); + + VkSparseMemoryBind imageMipTailMemoryBind; + + imageMipTailMemoryBind.resourceOffset = aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride; + imageMipTailMemoryBind.size = aspectRequirements.imageMipTailSize; + imageMipTailMemoryBind.memory = deviceMemory; + imageMipTailMemoryBind.memoryOffset = 0u; + imageMipTailMemoryBind.flags = 0u; + + imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + } + } + + if ((aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) && aspectRequirements.imageMipTailFirstLod < imageCreateInfo.mipLevels) + { + const VkMemoryAllocateInfo allocInfo = + { + VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // VkStructureType sType; + DE_NULL, // const void* pNext; + aspectRequirements.imageMipTailSize, // VkDeviceSize allocationSize; + memoryType, // deUint32 memoryTypeIndex; + }; + + VkDeviceMemory deviceMemory = 0; + VK_CHECK(deviceInterface.allocateMemory(*m_logicalDevice, &allocInfo, DE_NULL, &deviceMemory)); + + deviceMemUniquePtrVec.push_back(makeVkSharedPtr(Move(check(deviceMemory), Deleter(deviceInterface, *m_logicalDevice, DE_NULL)))); + + VkSparseMemoryBind imageMipTailMemoryBind; + + imageMipTailMemoryBind.resourceOffset = aspectRequirements.imageMipTailOffset; + imageMipTailMemoryBind.size = aspectRequirements.imageMipTailSize; + imageMipTailMemoryBind.memory = deviceMemory; + imageMipTailMemoryBind.memoryOffset = 0u; + imageMipTailMemoryBind.flags = 0u; + + imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind); + } + + const Unique imageMemoryBindSemaphore(makeSemaphore(deviceInterface, *m_logicalDevice)); + + VkBindSparseInfo bindSparseInfo = + { + VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType; + DE_NULL, //const void* pNext; + 0u, //deUint32 waitSemaphoreCount; + DE_NULL, //const VkSemaphore* pWaitSemaphores; + 0u, //deUint32 bufferBindCount; + DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds; + 0u, //deUint32 imageOpaqueBindCount; + DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds; + 0u, //deUint32 imageBindCount; + DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds; + 1u, //deUint32 signalSemaphoreCount; + &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores; + }; + + VkSparseImageMemoryBindInfo imageResidencyBindInfo; + VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo; + + if (imageResidencyMemoryBinds.size() > 0) + { + imageResidencyBindInfo.image = *sparseImage; + imageResidencyBindInfo.bindCount = static_cast(imageResidencyMemoryBinds.size()); + imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0]; + + bindSparseInfo.imageBindCount = 1u; + bindSparseInfo.pImageBinds = &imageResidencyBindInfo; + } + + if (imageMipTailMemoryBinds.size() > 0) + { + imageMipTailBindInfo.image = *sparseImage; + imageMipTailBindInfo.bindCount = static_cast(imageMipTailMemoryBinds.size()); + imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0]; + + bindSparseInfo.imageOpaqueBindCount = 1u; + bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo; + } + + // Submit sparse bind commands for execution + VK_CHECK(deviceInterface.queueBindSparse(sparseQueue.queueHandle, 1u, &bindSparseInfo, DE_NULL)); + + // Create command buffer for compute and transfer oparations + const Unique commandPool(makeCommandPool(deviceInterface, *m_logicalDevice, computeQueue.queueFamilyIndex)); + const Unique commandBuffer(makeCommandBuffer(deviceInterface, *m_logicalDevice, *commandPool)); + + // Start recording commands + beginCommandBuffer(deviceInterface, *commandBuffer); + + // Create descriptor set layout + const Unique descriptorSetLayout( + DescriptorSetLayoutBuilder() + .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT) + .build(deviceInterface, *m_logicalDevice)); + + // Create and bind compute pipeline + const Unique shaderModule(createShaderModule(deviceInterface, *m_logicalDevice, m_context.getBinaryCollection().get("comp"), DE_NULL)); + const Unique pipelineLayout(makePipelineLayout(deviceInterface, *m_logicalDevice, *descriptorSetLayout)); + const Unique computePipeline(makeComputePipeline(deviceInterface, *m_logicalDevice, *pipelineLayout, *shaderModule)); + + deviceInterface.cmdBindPipeline(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *computePipeline); + + // Create and bind descriptor set + const Unique descriptorPool( + DescriptorPoolBuilder() + .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1u) + .build(deviceInterface, *m_logicalDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u)); + + const Unique descriptorSet(makeDescriptorSet(deviceInterface, *m_logicalDevice, *descriptorPool, *descriptorSetLayout)); + + const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, getNumLayers(m_imageType, m_imageSize)); + const Unique imageView(makeImageView(deviceInterface, *m_logicalDevice, *sparseImage, mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange)); + const VkDescriptorImageInfo sparseImageInfo = makeDescriptorImageInfo(DE_NULL, *imageView, VK_IMAGE_LAYOUT_GENERAL); + + DescriptorSetUpdateBuilder() + .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &sparseImageInfo) + .update(deviceInterface, *m_logicalDevice); + + deviceInterface.cmdBindDescriptorSets(*commandBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL); + + const VkImageMemoryBarrier sparseImageLayoutChangeBarrier + = makeImageMemoryBarrier( + 0u, VK_ACCESS_SHADER_WRITE_BIT, + VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, + *sparseImage, subresourceRange); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageLayoutChangeBarrier); + + const tcu::UVec3 gridSize = getShaderGridSize(m_imageType, m_imageSize); + const tcu::UVec3 workGroupSize = computeWorkGroupSize(gridSize); + + const deUint32 xWorkGroupCount = gridSize.x() / workGroupSize.x() + (gridSize.x() % workGroupSize.x() ? 1u : 0u); + const deUint32 yWorkGroupCount = gridSize.y() / workGroupSize.y() + (gridSize.y() % workGroupSize.y() ? 1u : 0u); + const deUint32 zWorkGroupCount = gridSize.z() / workGroupSize.z() + (gridSize.z() % workGroupSize.z() ? 1u : 0u); + + const tcu::UVec3 maxComputeWorkGroupCount = tcu::UVec3(65535u, 65535u, 65535u); + + if (maxComputeWorkGroupCount.x() < xWorkGroupCount || + maxComputeWorkGroupCount.y() < yWorkGroupCount || + maxComputeWorkGroupCount.z() < zWorkGroupCount) + { + return tcu::TestStatus(QP_TEST_RESULT_NOT_SUPPORTED, "Image size is not supported"); + } + + deviceInterface.cmdDispatch(*commandBuffer, xWorkGroupCount, yWorkGroupCount, zWorkGroupCount); + + const VkImageMemoryBarrier sparseImageTrasferBarrier + = makeImageMemoryBarrier( + VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, + VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + *sparseImage, subresourceRange); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0u, 0u, DE_NULL, 0u, DE_NULL, 1u, &sparseImageTrasferBarrier); + + const deUint32 imageSizeInBytes = getNumPixels(m_imageType, m_imageSize) * tcu::getPixelSize(m_format); + const VkBufferCreateInfo outputBufferCreateInfo = makeBufferCreateInfo(imageSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT); + + const de::UniquePtr outputBuffer(new Buffer(deviceInterface, *m_logicalDevice, *allocator, outputBufferCreateInfo, MemoryRequirement::HostVisible)); + + const VkBufferImageCopy bufferImageCopy = makeBufferImageCopy(imageCreateInfo.extent, imageCreateInfo.arrayLayers); + + deviceInterface.cmdCopyImageToBuffer(*commandBuffer, *sparseImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, outputBuffer->get(), 1u, &bufferImageCopy); + + const VkBufferMemoryBarrier outputBufferHostReadBarrier + = makeBufferMemoryBarrier( + VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, + outputBuffer->get(), 0u, imageSizeInBytes); + + deviceInterface.cmdPipelineBarrier(*commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, 0u, 0u, DE_NULL, 1u, &outputBufferHostReadBarrier, 0u, DE_NULL); + + // End recording commands + endCommandBuffer(deviceInterface, *commandBuffer); + + // The stage at which execution is going to wait for finish of sparse binding operations + const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT }; + + // Submit commands for execution and wait for completion + submitCommandsAndWait(deviceInterface, *m_logicalDevice, computeQueue.queueHandle, *commandBuffer, 1u, &imageMemoryBindSemaphore.get(), stageBits); + + // Retrieve data from buffer to host memory + const Allocation& allocation = outputBuffer->getAllocation(); + + invalidateMappedMemoryRange(deviceInterface, *m_logicalDevice, allocation.getMemory(), allocation.getOffset(), imageSizeInBytes); + + const deUint8* outputData = static_cast(allocation.getHostPtr()); + tcu::TestStatus testStatus = tcu::TestStatus::pass("Passed"); + + const tcu::ConstPixelBufferAccess pixelBuffer = tcu::ConstPixelBufferAccess(m_format, gridSize.x(), gridSize.y(), gridSize.z(), outputData); + + // Validate results + if( aspectRequirements.imageMipTailFirstLod > 0u ) + { + const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u); + const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity); + const tcu::UVec3 lastBlockExtent = tcu::UVec3( mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width, + mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height, + mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth); + + for (deUint32 layerNdx = 0; layerNdx < imageCreateInfo.arrayLayers; ++layerNdx) + { + for (deUint32 z = 0; z < numSparseBinds.z(); ++z) + for (deUint32 y = 0; y < numSparseBinds.y(); ++y) + for (deUint32 x = 0; x < numSparseBinds.x(); ++x) + { + VkExtent3D offset; + offset.width = x*imageGranularity.width; + offset.height = y*imageGranularity.height; + offset.depth = z*imageGranularity.depth + layerNdx*numSparseBinds.z()*imageGranularity.depth; + + VkExtent3D extent; + extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width; + extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height; + extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth; + + const deUint32 linearIndex = x + y*numSparseBinds.x() + z*numSparseBinds.x()*numSparseBinds.y() + layerNdx*numSparseBinds.x()*numSparseBinds.y()*numSparseBinds.z(); + + if (linearIndex % 2 == 0) + { + for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ) + for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY) + for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX) + { + const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u); + const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); + + if (memcmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format))) + { + testStatus = tcu::TestStatus::fail("Failed"); + goto verificationFinished; + } + } + } + else + { + if (deviceProperties.sparseProperties.residencyNonResidentStrict) + { + for (deUint32 offsetZ = offset.depth; offsetZ < offset.depth + extent.depth; ++offsetZ) + for (deUint32 offsetY = offset.height; offsetY < offset.height + extent.height; ++offsetY) + for (deUint32 offsetX = offset.width; offsetX < offset.width + extent.width; ++offsetX) + { + const tcu::UVec4 referenceValue = tcu::UVec4(0u, 0u, 0u, 0u); + const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); + + if (memcmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format))) + { + testStatus = tcu::TestStatus::fail("Failed"); + goto verificationFinished; + } + } + } + } + } + } + } + else + { + const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, 0u); + + for (deUint32 offsetZ = 0u; offsetZ < mipExtent.depth * imageCreateInfo.arrayLayers; ++offsetZ) + for (deUint32 offsetY = 0u; offsetY < mipExtent.height; ++offsetY) + for (deUint32 offsetX = 0u; offsetX < mipExtent.width; ++offsetX) + { + const tcu::UVec4 referenceValue = tcu::UVec4(offsetX % 127u, offsetY % 127u, offsetZ % 127u, 1u); + const tcu::UVec4 outputValue = pixelBuffer.getPixelUint(offsetX, offsetY, offsetZ); + + if (memcmp(&outputValue, &referenceValue, sizeof(deUint32) * getNumUsedChannels(m_format))) + { + testStatus = tcu::TestStatus::fail("Failed"); + goto verificationFinished; + } + } + } + + verificationFinished: + + // Wait for sparse queue to become idle + deviceInterface.queueWaitIdle(sparseQueue.queueHandle); + + return testStatus; +} + +TestInstance* ImageSparseResidencyCase::createInstance (Context& context) const +{ + return new ImageSparseResidencyInstance(context, m_imageType, m_imageSize, m_format); +} + +} // anonymous ns + +tcu::TestCaseGroup* createImageSparseResidencyTests (tcu::TestContext& testCtx) +{ + de::MovePtr testGroup(new tcu::TestCaseGroup(testCtx, "image_sparse_residency", "Buffer Sparse Residency")); + + static const deUint32 sizeCountPerImageType = 3u; + + struct ImageParameters + { + ImageType imageType; + tcu::UVec3 imageSizes[sizeCountPerImageType]; + }; + + static const ImageParameters imageParametersArray[] = + { + { IMAGE_TYPE_2D, { tcu::UVec3(512u, 256u, 1u), tcu::UVec3(1024u, 128u, 1u), tcu::UVec3(11u, 137u, 1u) } }, + { IMAGE_TYPE_2D_ARRAY, { tcu::UVec3(512u, 256u, 6u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u) } }, + { IMAGE_TYPE_CUBE, { tcu::UVec3(512u, 256u, 1u), tcu::UVec3(1024u, 128u, 1u), tcu::UVec3(11u, 137u, 1u) } }, + { IMAGE_TYPE_CUBE_ARRAY, { tcu::UVec3(512u, 256u, 6u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u) } }, + { IMAGE_TYPE_3D, { tcu::UVec3(512u, 256u, 16u), tcu::UVec3(1024u, 128u, 8u), tcu::UVec3(11u, 137u, 3u) } } + }; + + static const tcu::TextureFormat formats[] = + { + tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT32), + tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT16), + tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT8), + tcu::TextureFormat(tcu::TextureFormat::RG, tcu::TextureFormat::SIGNED_INT32), + tcu::TextureFormat(tcu::TextureFormat::RG, tcu::TextureFormat::SIGNED_INT16), + tcu::TextureFormat(tcu::TextureFormat::RG, tcu::TextureFormat::SIGNED_INT8), + tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT32), + tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT16), + tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8) + }; + + for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); ++imageTypeNdx) + { + const ImageType imageType = imageParametersArray[imageTypeNdx].imageType; + de::MovePtr imageTypeGroup(new tcu::TestCaseGroup(testCtx, getImageTypeName(imageType).c_str(), "")); + + for (deInt32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx) + { + const tcu::TextureFormat& format = formats[formatNdx]; + de::MovePtr formatGroup(new tcu::TestCaseGroup(testCtx, getShaderImageFormatQualifier(format).c_str(), "")); + + for (deInt32 imageSizeNdx = 0; imageSizeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray[imageTypeNdx].imageSizes); ++imageSizeNdx) + { + const tcu::UVec3 imageSize = imageParametersArray[imageTypeNdx].imageSizes[imageSizeNdx]; + + std::ostringstream stream; + stream << imageSize.x() << "_" << imageSize.y() << "_" << imageSize.z(); + + formatGroup->addChild(new ImageSparseResidencyCase(testCtx, stream.str(), "", imageType, imageSize, format, glu::GLSL_VERSION_440)); + } + imageTypeGroup->addChild(formatGroup.release()); + } + testGroup->addChild(imageTypeGroup.release()); + } + + return testGroup.release(); +} + +} // sparse +} // vkt diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp new file mode 100644 index 0000000..791b236 --- /dev/null +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesImageSparseResidency.hpp @@ -0,0 +1,46 @@ +#ifndef _VKTSPARSERESOURCESIMAGESPARSERESIDENCY_HPP +#define _VKTSPARSERESOURCESIMAGESPARSERESIDENCY_HPP +/*------------------------------------------------------------------------ +* Vulkan Conformance Tests +* ------------------------ +* +* Copyright (c) 2016 The Khronos Group Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and/or associated documentation files (the +* "Materials"), to deal in the Materials without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Materials, and to +* permit persons to whom the Materials are furnished to do so, subject to +* the following conditions: +* +* The above copyright notice(s) and this permission notice shall be included +* in all copies or substantial portions of the Materials. +* +* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. +* +*//*! +* \file vktSparseResourcesImageSparseResidency.hpp +* \brief Sparse partially resident images tests +*//*--------------------------------------------------------------------*/ + +#include "tcuDefs.hpp" +#include "vktTestCase.hpp" + +namespace vkt +{ +namespace sparse +{ + +tcu::TestCaseGroup* createImageSparseResidencyTests(tcu::TestContext& testCtx); + +} // sparse +} // vkt + +#endif // _VKTSPARSERESOURCESIMAGESPARSERESIDENCY_HPP diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp index 4003db5..12e64e5 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTests.cpp @@ -32,6 +32,7 @@ #include "vktSparseResourcesBufferSparseBinding.hpp" #include "vktSparseResourcesImageSparseBinding.hpp" #include "vktSparseResourcesBufferSparseResidency.hpp" +#include "vktSparseResourcesImageSparseResidency.hpp" #include "deUniquePtr.hpp" namespace vkt @@ -46,6 +47,7 @@ tcu::TestCaseGroup* createTests (tcu::TestContext& testCtx) sparseTests->addChild(createBufferSparseBindingTests(testCtx)); sparseTests->addChild(createImageSparseBindingTests(testCtx)); sparseTests->addChild(createBufferSparseResidencyTests(testCtx)); + sparseTests->addChild(createImageSparseResidencyTests(testCtx)); return sparseTests.release(); } diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp index 3b6d89d..d6bd8f9 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.cpp @@ -634,6 +634,24 @@ std::string getShaderImageType (const tcu::TextureFormat& format, const ImageTyp return formatPart + "image" + imageTypePart; } + +std::string getShaderImageDataType(const tcu::TextureFormat& format) +{ + switch (tcu::getTextureChannelClass(format.type)) + { + case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER: + return "uvec4"; + case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER: + return "ivec4"; + case tcu::TEXTURECHANNELCLASS_FLOATING_POINT: + return "vec4"; + default: + DE_ASSERT(false); + return ""; + } +} + + std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format) { const char* orderPart; diff --git a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp index a7f0a12..98f5dd1 100644 --- a/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp +++ b/external/vulkancts/modules/vulkan/sparse_resources/vktSparseResourcesTestsUtil.hpp @@ -62,6 +62,7 @@ vk::VkImageType mapImageType (const ImageType imageType); vk::VkImageViewType mapImageViewType (const ImageType imageType); std::string getImageTypeName (const ImageType imageType); std::string getShaderImageType (const tcu::TextureFormat& format, const ImageType imageType); +std::string getShaderImageDataType (const tcu::TextureFormat& format); std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format); class Buffer -- 2.7.4