--- /dev/null
- DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TextureChannelClass::TEXTURECHANNELCLASS_FLOATING_POINT);
+ /*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Mobica Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Image load/store Tests
+ *//*--------------------------------------------------------------------*/
+
+ #include "vktImageLoadStoreTests.hpp"
+ #include "vktTestCaseUtil.hpp"
+ #include "vktImageTestsUtil.hpp"
+ #include "vktImageTexture.hpp"
+
+ #include "vkDefs.hpp"
+ #include "vkRef.hpp"
+ #include "vkRefUtil.hpp"
+ #include "vkPlatform.hpp"
+ #include "vkPrograms.hpp"
+ #include "vkMemUtil.hpp"
+ #include "vkBuilderUtil.hpp"
+ #include "vkQueryUtil.hpp"
+ #include "vkImageUtil.hpp"
+
+ #include "deUniquePtr.hpp"
+ #include "deStringUtil.hpp"
+
+ #include "tcuImageCompare.hpp"
+ #include "tcuTexture.hpp"
+ #include "tcuTextureUtil.hpp"
+ #include "tcuFloat.hpp"
+
+ #include <string>
+ #include <vector>
+
+ using namespace vk;
+
+ namespace vkt
+ {
+ namespace image
+ {
+ namespace
+ {
+
+ inline VkImageCreateInfo makeImageCreateInfo (const Texture& texture, const VkFormat format, const VkImageUsageFlags usage)
+ {
+ const VkImageCreateInfo imageParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ (isCube(texture) ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u), // VkImageCreateFlags flags;
+ mapImageType(texture.type()), // VkImageType imageType;
+ format, // VkFormat format;
+ makeExtent3D(texture.layerSize()), // VkExtent3D extent;
+ 1u, // deUint32 mipLevels;
+ texture.numLayers(), // deUint32 arrayLayers;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ usage, // VkImageUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyIndexCount;
+ DE_NULL, // const deUint32* pQueueFamilyIndices;
+ VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
+ };
+ return imageParams;
+ }
+
+ inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
+ {
+ return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
+ }
+
+ ImageType getImageTypeForSingleLayer (const ImageType imageType)
+ {
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D:
+ case IMAGE_TYPE_1D_ARRAY:
+ return IMAGE_TYPE_1D;
+
+ case IMAGE_TYPE_2D:
+ case IMAGE_TYPE_2D_ARRAY:
+ case IMAGE_TYPE_CUBE:
+ case IMAGE_TYPE_CUBE_ARRAY:
+ // A single layer for cube is a 2d face
+ return IMAGE_TYPE_2D;
+
+ case IMAGE_TYPE_3D:
+ return IMAGE_TYPE_3D;
+
+ case IMAGE_TYPE_BUFFER:
+ return IMAGE_TYPE_BUFFER;
+
+ default:
+ DE_FATAL("Internal test error");
+ return IMAGE_TYPE_LAST;
+ }
+ }
+
+ float computeStoreColorScale (const VkFormat format, const tcu::IVec3 imageSize)
+ {
+ const int maxImageDimension = de::max(imageSize.x(), de::max(imageSize.y(), imageSize.z()));
+ const float div = static_cast<float>(maxImageDimension - 1);
+
+ if (isUnormFormat(format))
+ return 1.0f / div;
+ else if (isSnormFormat(format))
+ return 2.0f / div;
+ else
+ return 1.0f;
+ }
+
+ inline float computeStoreColorBias (const VkFormat format)
+ {
+ return isSnormFormat(format) ? -1.0f : 0.0f;
+ }
+
+ inline bool isIntegerFormat (const VkFormat format)
+ {
+ return isIntFormat(format) || isUintFormat(format);
+ }
+
+ tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
+ {
+ switch (texture.type())
+ {
+ case IMAGE_TYPE_1D:
+ case IMAGE_TYPE_2D:
+ case IMAGE_TYPE_BUFFER:
+ // Not layered
+ DE_ASSERT(layer == 0);
+ return access;
+
+ case IMAGE_TYPE_1D_ARRAY:
+ return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
+
+ case IMAGE_TYPE_2D_ARRAY:
+ case IMAGE_TYPE_CUBE:
+ case IMAGE_TYPE_CUBE_ARRAY:
+ case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
+ return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
+
+ default:
+ DE_FATAL("Internal test error");
+ return tcu::ConstPixelBufferAccess();
+ }
+ }
+
+ std::string getFormatCaseName (const VkFormat format)
+ {
+ const std::string fullName = getFormatName(format);
+
+ DE_ASSERT(de::beginsWith(fullName, "VK_FORMAT_"));
+
+ return de::toLower(fullName.substr(10));
+ }
+
+ //! \return true if all layers match in both pixel buffers
+ bool comparePixelBuffers (tcu::TestLog& log,
+ const Texture& texture,
+ const VkFormat format,
+ const tcu::ConstPixelBufferAccess reference,
+ const tcu::ConstPixelBufferAccess result)
+ {
+ DE_ASSERT(reference.getFormat() == result.getFormat());
+ DE_ASSERT(reference.getSize() == result.getSize());
+
+ const bool intFormat = isIntegerFormat(format);
+ const bool is3d = (texture.type() == IMAGE_TYPE_3D);
+ const int numLayersOrSlices = (is3d ? texture.size().z() : texture.numLayers());
+ const int numCubeFaces = 6;
+
+ int passedLayers = 0;
+ for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
+ {
+ const std::string comparisonName = "Comparison" + de::toString(layerNdx);
+ const std::string comparisonDesc = "Image Comparison, " +
+ (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
+ is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx));
+
+ const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
+ const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
+
+ bool ok = false;
+ if (intFormat)
+ ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
+ else
+ ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
+
+ if (ok)
+ ++passedLayers;
+ }
+ return passedLayers == numLayersOrSlices;
+ }
+
+ //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
+ void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
+ {
- const DeviceInterface& vk = m_context.getDeviceInterface();
++ DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
+
+ for (int z = 0; z < access.getDepth(); ++z)
+ for (int y = 0; y < access.getHeight(); ++y)
+ for (int x = 0; x < access.getWidth(); ++x)
+ {
+ const tcu::Vec4 color(access.getPixel(x, y, z));
+ tcu::Vec4 newColor = color;
+
+ for (int i = 0; i < 4; ++i)
+ {
+ const tcu::Float32 f(color[i]);
+ if (f.isDenorm() || f.isInf() || f.isNaN())
+ newColor[i] = 0.0f;
+ }
+
+ if (newColor != color)
+ access.setPixel(newColor, x, y, z);
+ }
+ }
+
+ tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
+ {
+ // Generate a reference image data using the storage format
+
+ tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
+ const tcu::PixelBufferAccess access = reference.getAccess();
+
+ const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
+ const float storeColorBias = computeStoreColorBias(imageFormat);
+
+ const bool intFormat = isIntegerFormat(imageFormat);
+ const int xMax = imageSize.x() - 1;
+ const int yMax = imageSize.y() - 1;
+
+ for (int z = 0; z < imageSize.z(); ++z)
+ for (int y = 0; y < imageSize.y(); ++y)
+ for (int x = 0; x < imageSize.x(); ++x)
+ {
+ const tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
+
+ if (intFormat)
+ access.setPixel(color, x, y, z);
+ else
+ access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
+ }
+
+ // If the image is to be accessed as a float texture, get rid of invalid values
+
+ if (isFloatFormat(readFormat) && imageFormat != readFormat)
+ replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
+
+ return reference;
+ }
+
+ inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
+ {
+ return generateReferenceImage(imageSize, imageFormat, imageFormat);
+ }
+
+ void flipHorizontally (const tcu::PixelBufferAccess access)
+ {
+ const int xMax = access.getWidth() - 1;
+ const int halfWidth = access.getWidth() / 2;
+
+ if (isIntegerFormat(mapTextureFormat(access.getFormat())))
+ for (int z = 0; z < access.getDepth(); z++)
+ for (int y = 0; y < access.getHeight(); y++)
+ for (int x = 0; x < halfWidth; x++)
+ {
+ const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
+ access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
+ access.setPixel(temp, x, y, z);
+ }
+ else
+ for (int z = 0; z < access.getDepth(); z++)
+ for (int y = 0; y < access.getHeight(); y++)
+ for (int x = 0; x < halfWidth; x++)
+ {
+ const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
+ access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
+ access.setPixel(temp, x, y, z);
+ }
+ }
+
+ inline bool colorScaleAndBiasAreValid (const VkFormat format, const float colorScale, const float colorBias)
+ {
+ // Only normalized (fixed-point) formats may have scale/bias
+ const bool integerOrFloatFormat = isIntFormat(format) || isUintFormat(format) || isFloatFormat(format);
+ return !integerOrFloatFormat || (colorScale == 1.0f && colorBias == 0.0f);
+ }
+
+ inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
+ {
+ return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
+ }
+
+ void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
+ {
+ const DeviceInterface& vk = context.getDeviceInterface();
+
+ const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
+ const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
+ VK_ACCESS_SHADER_WRITE_BIT, 0u,
+ VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
+ image, fullImageSubresourceRange);
+
+ const void* const barriers[] = { &shaderWriteBarrier };
+
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+
+ void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
+ {
+ const DeviceInterface& vk = context.getDeviceInterface();
+
+ const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
+ VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
+ buffer, 0ull, bufferSizeBytes);
+
+ const void* const barriers[] = { &shaderWriteBarrier };
+
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+
+ //! Copy all layers of an image to a buffer.
+ void commandCopyImageToBuffer (Context& context,
+ const VkCommandBuffer cmdBuffer,
+ const VkImage image,
+ const VkBuffer buffer,
+ const VkDeviceSize bufferSizeBytes,
+ const Texture& texture)
+ {
+ const DeviceInterface& vk = context.getDeviceInterface();
+
+ const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
+ const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
+ VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ image, fullImageSubresourceRange);
+
+ const void* const barriersBeforeCopy[] = { &prepareForTransferBarrier };
+
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
+
+ const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
+ VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
+ buffer, 0ull, bufferSizeBytes);
+
+ const void* const barriersAfterCopy[] = { ©Barrier };
+
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriersBeforeCopy), barriersBeforeCopy);
+ vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriersAfterCopy), barriersAfterCopy);
+ }
+
+ //! Minimum chunk size is determined by the offset alignment requirements.
+ VkDeviceSize getOptimalUniformBufferChunkSize (Context& context, VkDeviceSize minimumRequiredChunkSizeBytes)
+ {
+ const VkPhysicalDeviceProperties properties = getPhysicalDeviceProperties(context.getInstanceInterface(), context.getPhysicalDevice());
+ const VkDeviceSize alignment = properties.limits.minUniformBufferOffsetAlignment;
+
+ if (minimumRequiredChunkSizeBytes > alignment)
+ return alignment + (minimumRequiredChunkSizeBytes / alignment) * alignment;
+ else
+ return alignment;
+ }
+
+ class StoreTest : public TestCase
+ {
+ public:
+ enum TestFlags
+ {
+ FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
+ };
+
+ StoreTest (tcu::TestContext& testCtx,
+ const std::string& name,
+ const std::string& description,
+ const Texture& texture,
+ const VkFormat format,
+ const TestFlags flags = static_cast<TestFlags>(0));
+
+ void initPrograms (SourceCollections& programCollection) const;
+
+ TestInstance* createInstance (Context& context) const;
+
+ private:
+ const Texture m_texture;
+ const VkFormat m_format;
+ const bool m_singleLayerBind;
+ };
+
+ StoreTest::StoreTest (tcu::TestContext& testCtx,
+ const std::string& name,
+ const std::string& description,
+ const Texture& texture,
+ const VkFormat format,
+ const TestFlags flags)
+ : TestCase (testCtx, name, description)
+ , m_texture (texture)
+ , m_format (format)
+ , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
+ {
+ if (m_singleLayerBind)
+ DE_ASSERT(m_texture.numLayers() > 1);
+ }
+
+ void StoreTest::initPrograms (SourceCollections& programCollection) const
+ {
+ const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
+ const float storeColorBias = computeStoreColorBias(m_format);
+ DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
+ DE_UNREF(colorScaleAndBiasAreValid);
+
+ const std::string xMax = de::toString(m_texture.size().x() - 1);
+ const std::string yMax = de::toString(m_texture.size().y() - 1);
+ const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
+ const std::string colorBaseExpr = signednessPrefix + "vec4("
+ + "gx^gy^gz, "
+ + "(" + xMax + "-gx)^gy^gz, "
+ + "gx^(" + yMax + "-gy)^gz, "
+ + "(" + xMax + "-gx)^(" + yMax + "-gy)^gz)";
+
+ const std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
+ + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
+
+ const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
+ const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
+
+ const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
+ const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
+ const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
+
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ << "\n"
+ << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+ << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform highp " << imageTypeStr << " u_image;\n";
+
+ if (m_singleLayerBind)
+ src << "layout (binding = 1) readonly uniform Constants {\n"
+ << " int u_layerNdx;\n"
+ << "};\n";
+
+ src << "\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " int gx = int(gl_GlobalInvocationID.x);\n"
+ << " int gy = int(gl_GlobalInvocationID.y);\n"
+ << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
+ << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
+ << "}\n";
+
+ programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
+ }
+
+ //! Generic test iteration algorithm for image tests
+ class BaseTestInstance : public TestInstance
+ {
+ public:
+ BaseTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const bool singleLayerBind);
+
+ tcu::TestStatus iterate (void);
+
+ virtual ~BaseTestInstance (void) {}
+
+ protected:
+ virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
+ virtual tcu::TestStatus verifyResult (void) = 0;
+
+ virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
+ virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
+ virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
+
+ virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
+ const VkPipelineLayout pipelineLayout,
+ const int layerNdx) = 0;
+
+ const Texture m_texture;
+ const VkFormat m_format;
+ const bool m_singleLayerBind;
+ };
+
+ BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
+ : TestInstance (context)
+ , m_texture (texture)
+ , m_format (format)
+ , m_singleLayerBind (singleLayerBind)
+ {
+ }
+
+ tcu::TestStatus BaseTestInstance::iterate (void)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+
+ const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
+
+ const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
+ const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
+ const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
+
+ const Unique<VkCommandPool> cmdPool(makeCommandPool(vk, device, queueFamilyIndex));
+ const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
+
+ beginCommandBuffer(vk, *cmdBuffer);
+
+ vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+ commandBeforeCompute(*cmdBuffer);
+
+ const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
+ const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
+ for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
+ {
+ commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
+
+ if (layerNdx > 0)
+ commandBetweenShaderInvocations(*cmdBuffer);
+
+ vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
+ }
+
+ commandAfterCompute(*cmdBuffer);
+
+ endCommandBuffer(vk, *cmdBuffer);
+
+ submitCommandsAndWait(vk, device, queue, *cmdBuffer);
+
+ return verifyResult();
+ }
+
+ //! Base store test implementation
+ class StoreTestInstance : public BaseTestInstance
+ {
+ public:
+ StoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const bool singleLayerBind);
+
+ protected:
+ tcu::TestStatus verifyResult (void);
+
+ // Add empty implementations for functions that might be not needed
+ void commandBeforeCompute (const VkCommandBuffer) {}
+ void commandBetweenShaderInvocations (const VkCommandBuffer) {}
+ void commandAfterCompute (const VkCommandBuffer) {}
+
+ de::MovePtr<Buffer> m_imageBuffer;
+ const VkDeviceSize m_imageSizeBytes;
+ };
+
+ StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
+ : BaseTestInstance (context, texture, format, singleLayerBind)
+ , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
+
+ m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
+ vk, device, allocator,
+ makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
+ MemoryRequirement::HostVisible));
+ }
+
+ tcu::TestStatus StoreTestInstance::verifyResult (void)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ const tcu::IVec3 imageSize = m_texture.size();
+ const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
+
+ const Allocation& alloc = m_imageBuffer->getAllocation();
+ invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
+ const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, alloc.getHostPtr());
+
+ if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
+ return tcu::TestStatus::pass("Passed");
+ else
+ return tcu::TestStatus::fail("Image comparison failed");
+ }
+
+ //! Store test for images
+ class ImageStoreTestInstance : public StoreTestInstance
+ {
+ public:
+ ImageStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const bool singleLayerBind);
+
+ protected:
+ VkDescriptorSetLayout prepareDescriptors (void);
+ void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
+ void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
+ void commandAfterCompute (const VkCommandBuffer cmdBuffer);
+
+ void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
+ const VkPipelineLayout pipelineLayout,
+ const int layerNdx);
+
+ de::MovePtr<Image> m_image;
+ de::MovePtr<Buffer> m_constantsBuffer;
+ const VkDeviceSize m_constantsBufferChunkSizeBytes;
+ Move<VkDescriptorSetLayout> m_descriptorSetLayout;
+ Move<VkDescriptorPool> m_descriptorPool;
+ DynArray<Move<VkDescriptorSet> > m_allDescriptorSets;
+ DynArray<Move<VkImageView> > m_allImageViews;
+ };
+
+ ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const bool singleLayerBind)
+ : StoreTestInstance (context, texture, format, singleLayerBind)
+ , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context, sizeof(deUint32)))
+ , m_allDescriptorSets (texture.numLayers())
+ , m_allImageViews (texture.numLayers())
+ {
++ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ m_image = de::MovePtr<Image>(new Image(
+ vk, device, allocator,
+ makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT),
+ MemoryRequirement::Any));
+
+ // This buffer will be used to pass constants to the shader
+
+ const int numLayers = m_texture.numLayers();
+ const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
+ m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
+ vk, device, allocator,
+ makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
+ MemoryRequirement::HostVisible));
+
+ {
+ const Allocation& alloc = m_constantsBuffer->getAllocation();
+ deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
+
+ memset(alloc.getHostPtr(), 0, constantsBufferSizeBytes);
+
+ for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
+ {
+ deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
+ *valuePtr = static_cast<deUint32>(layerNdx);
+ }
+
+ flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), constantsBufferSizeBytes);
+ }
+ }
+
+ VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ const int numLayers = m_texture.numLayers();
+ m_descriptorSetLayout = DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(vk, device);
+
+ m_descriptorPool = DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
+ .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
+ .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
+
+ if (m_singleLayerBind)
+ {
+ for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
+ {
+ m_allDescriptorSets[layerNdx] = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
+ m_allImageViews[layerNdx] = makeImageView(vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u));
+ }
+ }
+ else // bind all layers at once
+ {
+ m_allDescriptorSets[0] = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
+ m_allImageViews[0] = makeImageView(vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
+ makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers));
+ }
+
+ return *m_descriptorSetLayout; // not passing the ownership
+ }
+
+ void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ const VkDescriptorSet descriptorSet = *m_allDescriptorSets[layerNdx];
+ const VkImageView imageView = *m_allImageViews[layerNdx];
+
+ const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
+
+ // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
+ const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
+ m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
+ .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
+ .update(vk, device);
+ vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
+ }
+
+ void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+
+ const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
+ const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
+ 0u, 0u,
+ VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
+ m_image->get(), fullImageSubresourceRange);
+
+ const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
+ const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
+ VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
+ m_constantsBuffer->get(), 0ull, constantsBufferSize);
+
+ const void* const barriersBefore[] = { &writeConstantsBarrier, &setImageLayoutBarrier };
+
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriersBefore), barriersBefore);
+ }
+
+ void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
+ {
+ commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
+ }
+
+ void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
+ {
+ commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
+ }
+
+ //! Store test for buffers
+ class BufferStoreTestInstance : public StoreTestInstance
+ {
+ public:
+ BufferStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format);
+
+ protected:
+ VkDescriptorSetLayout prepareDescriptors (void);
+ void commandAfterCompute (const VkCommandBuffer cmdBuffer);
+
+ void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
+ const VkPipelineLayout pipelineLayout,
+ const int layerNdx);
+
+ Move<VkDescriptorSetLayout> m_descriptorSetLayout;
+ Move<VkDescriptorPool> m_descriptorPool;
+ Move<VkDescriptorSet> m_descriptorSet;
+ Move<VkBufferView> m_bufferView;
+ };
+
+ BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format)
+ : StoreTestInstance(context, texture, format, false)
+ {
+ }
+
+ VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ m_descriptorSetLayout = DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(vk, device);
+
+ m_descriptorPool = DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
+ .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+ m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
+ m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
+
+ return *m_descriptorSetLayout; // not passing the ownership
+ }
+
+ void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
+ {
+ DE_ASSERT(layerNdx == 0);
+ DE_UNREF(layerNdx);
+
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
+ .update(vk, device);
+ vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
+ }
+
+ void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
+ {
+ commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes);
+ }
+
+ class LoadStoreTest : public TestCase
+ {
+ public:
+ enum TestFlags
+ {
+ FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
+ FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
+ };
+
+ LoadStoreTest (tcu::TestContext& testCtx,
+ const std::string& name,
+ const std::string& description,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat,
+ const TestFlags flags = static_cast<TestFlags>(0));
+
+ void initPrograms (SourceCollections& programCollection) const;
+ TestInstance* createInstance (Context& context) const;
+
+ private:
+ const Texture m_texture;
+ const VkFormat m_format; //!< Format as accessed in the shader
+ const VkFormat m_imageFormat; //!< Storage format
+ const bool m_singleLayerBind;
+ const bool m_restrictImages;
+ };
+
+ LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
+ const std::string& name,
+ const std::string& description,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat,
+ const TestFlags flags)
+ : TestCase (testCtx, name, description)
+ , m_texture (texture)
+ , m_format (format)
+ , m_imageFormat (imageFormat)
+ , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
+ , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
+ {
+ if (m_singleLayerBind)
+ DE_ASSERT(m_texture.numLayers() > 1);
+
+ DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
+ }
+
+ void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
+ {
+ const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
+ const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
+ const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
+ const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
+ const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
+ const std::string xMax = de::toString(m_texture.size().x() - 1);
+
+ std::ostringstream src;
+ src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
+ << "\n"
+ << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
+ << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform highp " << imageTypeStr << " u_image0;\n"
+ << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform highp " << imageTypeStr << " u_image1;\n"
+ << "\n"
+ << "void main (void)\n"
+ << "{\n"
+ << (dimension == 1 ?
+ " int pos = int(gl_GlobalInvocationID.x);\n"
+ " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n"
+ : dimension == 2 ?
+ " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
+ " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n"
+ : dimension == 3 ?
+ " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
+ " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n"
+ : "")
+ << "}\n";
+
+ programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
+ }
+
+ //! Load/store test base implementation
+ class LoadStoreTestInstance : public BaseTestInstance
+ {
+ public:
+ LoadStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat,
+ const bool singleLayerBind);
+
+ protected:
+ virtual Buffer* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
+
+ tcu::TestStatus verifyResult (void);
+
+ // Add empty implementations for functions that might be not needed
+ void commandBeforeCompute (const VkCommandBuffer) {}
+ void commandBetweenShaderInvocations (const VkCommandBuffer) {}
+ void commandAfterCompute (const VkCommandBuffer) {}
+
+ de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
+ const VkDeviceSize m_imageSizeBytes;
+ const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
+ tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
+ };
+
+ LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat,
+ const bool singleLayerBind)
+ : BaseTestInstance (context, texture, format, singleLayerBind)
+ , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
+ , m_imageFormat (imageFormat)
+ , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ // A helper buffer with enough space to hold the whole image.
+
+ m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
+ vk, device, allocator,
+ makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
+ MemoryRequirement::HostVisible));
+
+ // Copy reference data to buffer for subsequent upload to image.
+
+ const Allocation& alloc = m_imageBuffer->getAllocation();
+ memcpy(alloc.getHostPtr(), m_referenceImage.getAccess().getDataPtr(), m_imageSizeBytes);
+ flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
+ }
+
+ tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ // Apply the same transformation as done in the shader
+ const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
+ flipHorizontally(reference);
+
+ const Allocation& alloc = getResultBuffer()->getAllocation();
+ invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
+ const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), alloc.getHostPtr());
+
+ if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
+ return tcu::TestStatus::pass("Passed");
+ else
+ return tcu::TestStatus::fail("Image comparison failed");
+ }
+
+ //! Load/store test for images
+ class ImageLoadStoreTestInstance : public LoadStoreTestInstance
+ {
+ public:
+ struct PerLayerData
+ {
+ PerLayerData (Move<VkDescriptorSet> descriptorSet,
+ Move<VkImageView> imageViewSrc,
+ Move<VkImageView> imageViewDst);
+
+ const Unique<VkDescriptorSet> descriptorSet;
+ const Unique<VkImageView> imageViewSrc;
+ const Unique<VkImageView> imageViewDst;
+ };
+
+ ImageLoadStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat,
+ const bool singleLayerBind);
+
+ protected:
+ VkDescriptorSetLayout prepareDescriptors (void);
+ void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
+ void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
+ void commandAfterCompute (const VkCommandBuffer cmdBuffer);
+
+ void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
+ const VkPipelineLayout pipelineLayout,
+ const int layerNdx);
+
+ Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
+
+ de::MovePtr<Image> m_imageSrc;
+ de::MovePtr<Image> m_imageDst;
+ Move<VkDescriptorSetLayout> m_descriptorSetLayout;
+ Move<VkDescriptorPool> m_descriptorPool;
+ DynArray<de::MovePtr<PerLayerData> > m_perLayerData;
+ };
+
+ ImageLoadStoreTestInstance::PerLayerData::PerLayerData (Move<VkDescriptorSet> descriptorSet_,
+ Move<VkImageView> imageViewSrc_,
+ Move<VkImageView> imageViewDst_)
+ : descriptorSet (descriptorSet_)
+ , imageViewSrc (imageViewSrc_)
+ , imageViewDst (imageViewDst_)
+ {
+ }
+
+ ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat,
+ const bool singleLayerBind)
+ : LoadStoreTestInstance (context, texture, format, imageFormat, singleLayerBind)
+ , m_perLayerData (texture.numLayers())
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ m_imageSrc = de::MovePtr<Image>(new Image(
+ vk, device, allocator,
+ makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
+ MemoryRequirement::Any));
+
+ m_imageDst = de::MovePtr<Image>(new Image(
+ vk, device, allocator,
+ makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT),
+ MemoryRequirement::Any));
+ }
+
+ VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
+ {
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+
+ const int numLayers = m_texture.numLayers();
+ m_descriptorSetLayout = DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(vk, device);
+
+ m_descriptorPool = DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
+ .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
+
+ if (m_singleLayerBind)
+ {
+ for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
+ {
+ const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
+ const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
+
+ de::MovePtr<PerLayerData> data(new PerLayerData(
+ makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout),
+ makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange),
+ makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange)));
+
+ m_perLayerData[layerNdx] = data;
+ }
+ }
+ else // bind all layers at once
+ {
+ const VkImageViewType viewType = mapImageViewType(m_texture.type());
+ const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
+
+ de::MovePtr<PerLayerData> data(new PerLayerData(
+ makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout),
+ makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange),
+ makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange)));
+
+ m_perLayerData[0] = data;
+ }
+
+ return *m_descriptorSetLayout; // not passing the ownership
+ }
+
+ void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
+ {
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+
+ const PerLayerData* data = m_perLayerData[layerNdx].get();
+
+ const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, *data->imageViewSrc, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
+ const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, *data->imageViewDst, VK_IMAGE_LAYOUT_GENERAL);
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*data->descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
+ .writeSingle(*data->descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
+ .update(vk, device);
+ vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &data->descriptorSet.get(), 0u, DE_NULL);
+ }
+
+ void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+
+ const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
+ {
+ const VkImageMemoryBarrier barrierSetSrcImageLayout = makeImageMemoryBarrier(
+ 0u, 0u,
+ VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ m_imageSrc->get(), fullImageSubresourceRange);
+
+ const VkImageMemoryBarrier barrierSetDstImageLayout = makeImageMemoryBarrier(
+ 0u, 0u,
+ VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
+ m_imageDst->get(), fullImageSubresourceRange);
+
+ const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
+ VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
+ m_imageBuffer->get(), 0ull, m_imageSizeBytes);
+
+ const void* const barriers[] = { &barrierSetSrcImageLayout, &barrierSetDstImageLayout, &barrierFlushHostWriteBeforeCopy };
+
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
+ DE_FALSE, DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+ {
+ const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
+ VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
+ m_imageSrc->get(), fullImageSubresourceRange);
+
+ const void* const barriers[] = { &barrierAfterCopy };
+
+ const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
+
+ vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
+ vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+ }
+
+ void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
+ {
+ commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
+ }
+
+ void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
+ {
+ commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
+ }
+
+ //! Load/store test for buffers
+ class BufferLoadStoreTestInstance : public LoadStoreTestInstance
+ {
+ public:
+ BufferLoadStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat);
+
+ protected:
+ VkDescriptorSetLayout prepareDescriptors (void);
+ void commandAfterCompute (const VkCommandBuffer cmdBuffer);
+
+ void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
+ const VkPipelineLayout pipelineLayout,
+ const int layerNdx);
+
+ Buffer* getResultBuffer (void) const { return m_imageBufferDst.get(); }
+
+ de::MovePtr<Buffer> m_imageBufferDst;
+ Move<VkDescriptorSetLayout> m_descriptorSetLayout;
+ Move<VkDescriptorPool> m_descriptorPool;
+ Move<VkDescriptorSet> m_descriptorSet;
+ Move<VkBufferView> m_bufferViewSrc;
+ Move<VkBufferView> m_bufferViewDst;
+ };
+
+ BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
+ const Texture& texture,
+ const VkFormat format,
+ const VkFormat imageFormat)
+ : LoadStoreTestInstance(context, texture, format, imageFormat, false)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ // Create a destination buffer.
+
+ m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
+ vk, device, allocator,
+ makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
+ MemoryRequirement::HostVisible));
+ }
+
+ VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
+ {
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+ const VkDevice device = m_context.getDevice();
+
+ m_descriptorSetLayout = DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(vk, device);
+
+ m_descriptorPool = DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
+ .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+ m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
+ m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
+ m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), m_format, 0ull, m_imageSizeBytes);
+
+ return *m_descriptorSetLayout; // not passing the ownership
+ }
+
+ void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
+ {
+ DE_ASSERT(layerNdx == 0);
+ DE_UNREF(layerNdx);
+
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& vk = m_context.getDeviceInterface();
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewSrc.get())
+ .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
+ .update(vk, device);
+ vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
+ }
+
+ void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
+ {
+ commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes);
+ }
+
+ TestInstance* StoreTest::createInstance (Context& context) const
+ {
+ if (m_texture.type() == IMAGE_TYPE_BUFFER)
+ return new BufferStoreTestInstance(context, m_texture, m_format);
+ else
+ return new ImageStoreTestInstance(context, m_texture, m_format, m_singleLayerBind);
+ }
+
+ TestInstance* LoadStoreTest::createInstance (Context& context) const
+ {
+ if (m_texture.type() == IMAGE_TYPE_BUFFER)
+ return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat);
+ else
+ return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_singleLayerBind);
+ }
+
+ // TODO Which image/format combinations should be supported? Spec says it should be queried with vkGetPhysicalDeviceImageFormatProperties.
+ // What about buffer/format? (texel storage buffer) (use vkGetPhysicalDeviceFormatProperties ?)
+
+ static const Texture s_textures[] =
+ {
+ Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
+ Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
+ Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
+ Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
+ Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
+ Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
+ Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
+ Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
+ };
+
+ const Texture& getTestTexture (const ImageType imageType)
+ {
+ for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
+ if (s_textures[textureNdx].type() == imageType)
+ return s_textures[textureNdx];
+
+ DE_FATAL("Internal error");
+ return s_textures[0];
+ }
+
+ static const VkFormat s_formats[] =
+ {
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+ VK_FORMAT_R16G16B16A16_SFLOAT,
+ VK_FORMAT_R32_SFLOAT,
+
+ VK_FORMAT_R32G32B32A32_UINT,
+ VK_FORMAT_R16G16B16A16_UINT,
+ VK_FORMAT_R8G8B8A8_UINT,
+ VK_FORMAT_R32_UINT,
+
+ VK_FORMAT_R32G32B32A32_SINT,
+ VK_FORMAT_R16G16B16A16_SINT,
+ VK_FORMAT_R8G8B8A8_SINT,
+ VK_FORMAT_R32_SINT,
+
+ VK_FORMAT_R8G8B8A8_UNORM,
+
+ VK_FORMAT_R8G8B8A8_SNORM,
+ };
+
+ } // anonymous ns
+
+ tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
+ {
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
+
+ for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
+ {
+ const Texture& texture = s_textures[textureNdx];
+ de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
+ const bool isLayered = (texture.numLayers() > 1);
+
+ for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
+ {
+ groupByImageViewType->addChild(new StoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
+
+ if (isLayered)
+ groupByImageViewType->addChild(new StoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]) + "_single_layer", "",
+ texture, s_formats[formatNdx], StoreTest::FLAG_SINGLE_LAYER_BIND));
+ }
+ testGroup->addChild(groupByImageViewType.release());
+ }
+
+ return testGroup.release();
+ }
+
+ tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
+ {
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
+
+ for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
+ {
+ const Texture& texture = s_textures[textureNdx];
+ de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
+ const bool isLayered = (texture.numLayers() > 1);
+
+ for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
+ {
+ groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]), "",
+ texture, s_formats[formatNdx], s_formats[formatNdx]));
+
+ if (isLayered)
+ groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]) + "_single_layer", "",
+ texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_SINGLE_LAYER_BIND));
+ }
+ testGroup->addChild(groupByImageViewType.release());
+ }
+
+ return testGroup.release();
+ }
+
+ tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
+ {
+ de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
+
+ for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
+ {
+ const Texture& texture = s_textures[textureNdx];
+ de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
+
+ for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
+ for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
+ {
+ //TODO Are all conversions valid or do we have to limit (or expand) somehow? Is it stated anywhere in the spec?
+
+ const std::string caseName = getFormatCaseName(s_formats[imageFormatNdx]) + "_" + getFormatCaseName(s_formats[formatNdx]);
+ if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
+ groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
+ }
+ testGroup->addChild(groupByImageViewType.release());
+ }
+
+ return testGroup.release();
+ }
+
+ de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
+ {
+ const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
+ const Texture& texture = getTestTexture(imageType);
+ return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES));
+ }
+
+ } // image
+ } // vkt
--- /dev/null
-
+ /*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Mobica Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Memory qualifiers tests
+ *//*--------------------------------------------------------------------*/
+
+ #include "vktImageQualifiersTests.hpp"
+ #include "vktImageLoadStoreTests.hpp"
+ #include "vktImageTestsUtil.hpp"
+
+ #include "vkDefs.hpp"
+ #include "vkImageUtil.hpp"
+ #include "vkRef.hpp"
+ #include "vkRefUtil.hpp"
+ #include "vktTestCase.hpp"
+ #include "vktTestCaseUtil.hpp"
+ #include "vkPlatform.hpp"
+ #include "vkPrograms.hpp"
+ #include "vkMemUtil.hpp"
+ #include "vkBuilderUtil.hpp"
+ #include "vkQueryUtil.hpp"
+ #include "vkTypeUtil.hpp"
+
+ #include "deDefs.hpp"
+ #include "deStringUtil.hpp"
+ #include "deUniquePtr.hpp"
+
+ #include "tcuImageCompare.hpp"
+ #include "tcuTexture.hpp"
+ #include "tcuTextureUtil.hpp"
+ #include "tcuVectorType.hpp"
+
+ using namespace vk;
+
+ namespace vkt
+ {
+ namespace image
+ {
+ namespace
+ {
+
+ static const tcu::UVec3 g_localWorkGroupSizeBase = tcu::UVec3(8, 8, 2);
+ static const deInt32 g_ShaderReadOffsetsX[4] = { 1, 4, 7, 10 };
+ static const deInt32 g_ShaderReadOffsetsY[4] = { 2, 5, 8, 11 };
+ static const deInt32 g_ShaderReadOffsetsZ[4] = { 3, 6, 9, 12 };
+ static const char* const g_ShaderReadOffsetsXStr = "int[]( 1, 4, 7, 10 )";
+ static const char* const g_ShaderReadOffsetsYStr = "int[]( 2, 5, 8, 11 )";
+ static const char* const g_ShaderReadOffsetsZStr = "int[]( 3, 6, 9, 12 )";
+
+ const tcu::UVec3 getComputeGridSize (const ImageType imageType, const tcu::UVec4& imageSize)
+ {
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D:
+ case IMAGE_TYPE_2D:
+ case IMAGE_TYPE_2D_ARRAY:
+ case IMAGE_TYPE_3D:
+ case IMAGE_TYPE_CUBE:
+ case IMAGE_TYPE_CUBE_ARRAY:
+ case IMAGE_TYPE_BUFFER:
+ return tcu::UVec3(imageSize.x(), imageSize.y(), imageSize.z() * imageSize.w());
+
+ case IMAGE_TYPE_1D_ARRAY:
+ return tcu::UVec3(imageSize.x(), imageSize.w(), 1);
+
+ default:
+ DE_FATAL("Unknown image type");
+ return tcu::UVec3(1, 1, 1);
+ }
+ }
+
+ const tcu::UVec3 getLocalWorkGroupSize (const ImageType imageType, const tcu::UVec4& imageSize)
+ {
+ const tcu::UVec3 computeGridSize = getComputeGridSize(imageType, imageSize);
+
+ const tcu::UVec3 localWorkGroupSize = tcu::UVec3(de::min(g_localWorkGroupSizeBase.x(), computeGridSize.x()),
+ de::min(g_localWorkGroupSizeBase.y(), computeGridSize.y()),
+ de::min(g_localWorkGroupSizeBase.z(), computeGridSize.z()));
+ return localWorkGroupSize;
+ }
+
+ const tcu::UVec3 getNumWorkGroups (const ImageType imageType, const tcu::UVec4& imageSize)
+ {
+ const tcu::UVec3 computeGridSize = getComputeGridSize(imageType, imageSize);
+ const tcu::UVec3 localWorkGroupSize = getLocalWorkGroupSize(imageType, imageSize);
+
+ return computeGridSize / localWorkGroupSize;
+ }
+
+ tcu::ConstPixelBufferAccess getLayerOrSlice (const ImageType imageType,
+ const tcu::ConstPixelBufferAccess& access,
+ const deUint32 layer)
+ {
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D:
+ case IMAGE_TYPE_2D:
+ case IMAGE_TYPE_BUFFER:
+ DE_ASSERT(layer == 0);
+ return access;
+
+ case IMAGE_TYPE_1D_ARRAY:
+ return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
+
+ case IMAGE_TYPE_2D_ARRAY:
+ case IMAGE_TYPE_3D:
+ case IMAGE_TYPE_CUBE:
+ case IMAGE_TYPE_CUBE_ARRAY:
+ return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
+
+ default:
+ DE_FATAL("Unknown image type");
+ return tcu::ConstPixelBufferAccess();
+ }
+ }
+
+ bool comparePixelBuffers (tcu::TestContext& testCtx,
+ const ImageType imageType,
+ const tcu::UVec4& imageSize,
+ const tcu::TextureFormat& format,
+ const tcu::ConstPixelBufferAccess& reference,
+ const tcu::ConstPixelBufferAccess& result)
+ {
+ DE_ASSERT(reference.getFormat() == result.getFormat());
+ DE_ASSERT(reference.getSize() == result.getSize());
+
+ const bool intFormat = isIntFormat(mapTextureFormat(format)) || isUintFormat(mapTextureFormat(format));
+ deUint32 passedLayers = 0;
-
++
+ for (deUint32 layerNdx = 0; layerNdx < imageSize.z() * imageSize.w(); ++layerNdx)
+ {
+ const std::string comparisonName = "Comparison" + de::toString(layerNdx);
+
+ std::string comparisonDesc = "Image Comparison, ";
+ switch (imageType)
+ {
+ case IMAGE_TYPE_3D:
+ comparisonDesc = comparisonDesc + "slice " + de::toString(layerNdx);
+ break;
+
+ case IMAGE_TYPE_CUBE:
+ case IMAGE_TYPE_CUBE_ARRAY:
+ comparisonDesc = comparisonDesc + "face " + de::toString(layerNdx % 6) + ", cube " + de::toString(layerNdx / 6);
+ break;
+
+ default:
+ comparisonDesc = comparisonDesc + "layer " + de::toString(layerNdx);
+ break;
+ }
+
+ const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(imageType, reference, layerNdx);
+ const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(imageType, result, layerNdx);
+
+ bool ok = false;
+ if (intFormat)
+ ok = tcu::intThresholdCompare(testCtx.getLog(), comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
+ else
+ ok = tcu::floatThresholdCompare(testCtx.getLog(), comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
+
+ if (ok)
+ ++passedLayers;
+ }
+
+ return passedLayers == (imageSize.z() * imageSize.w());
+ }
+
+ const std::string getCoordStr (const ImageType imageType,
+ const std::string& x,
+ const std::string& y,
+ const std::string& z)
+ {
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D:
+ case IMAGE_TYPE_BUFFER:
+ return x;
+
+ case IMAGE_TYPE_1D_ARRAY:
+ case IMAGE_TYPE_2D:
+ return "ivec2(" + x + "," + y + ")";
+
+ case IMAGE_TYPE_2D_ARRAY:
+ case IMAGE_TYPE_3D:
+ case IMAGE_TYPE_CUBE:
+ case IMAGE_TYPE_CUBE_ARRAY:
+ return "ivec3(" + x + "," + y + "," + z + ")";
+
+ default:
+ DE_ASSERT(false);
+ return "";
+ }
+ }
+
+ class MemoryQualifierTestCase : public vkt::TestCase
+ {
+ public:
+
+ enum Qualifier
+ {
+ QUALIFIER_COHERENT = 0,
+ QUALIFIER_VOLATILE,
+ QUALIFIER_RESTRICT,
+ QUALIFIER_LAST
+ };
+
+ MemoryQualifierTestCase (tcu::TestContext& testCtx,
+ const std::string& name,
+ const std::string& description,
+ const Qualifier qualifier,
+ const ImageType imageType,
+ const tcu::UVec4& imageSize,
+ const tcu::TextureFormat& format,
+ const glu::GLSLVersion glslVersion);
+
+ virtual ~MemoryQualifierTestCase (void) {}
+
+ virtual void initPrograms (SourceCollections& programCollection) const;
+ virtual TestInstance* createInstance (Context& context) const;
+
+ protected:
+
+ const Qualifier m_qualifier;
+ const ImageType m_imageType;
+ const tcu::UVec4 m_imageSize;
+ const tcu::TextureFormat m_format;
+ const glu::GLSLVersion m_glslVersion;
+ };
+
+ MemoryQualifierTestCase::MemoryQualifierTestCase (tcu::TestContext& testCtx,
+ const std::string& name,
+ const std::string& description,
+ const Qualifier qualifier,
+ const ImageType imageType,
+ const tcu::UVec4& imageSize,
+ const tcu::TextureFormat& format,
+ const glu::GLSLVersion glslVersion)
+ : vkt::TestCase(testCtx, name, description)
+ , m_qualifier(qualifier)
+ , m_imageType(imageType)
+ , m_imageSize(imageSize)
+ , m_format(format)
+ , m_glslVersion(glslVersion)
+ {
+ }
+
+ void MemoryQualifierTestCase::initPrograms (SourceCollections& programCollection) const
+ {
+ const char* const versionDecl = glu::getGLSLVersionDeclaration(m_glslVersion);
+
+ const char* const qualifierName = m_qualifier == QUALIFIER_COHERENT ? "coherent"
+ : m_qualifier == QUALIFIER_VOLATILE ? "volatile"
+ : DE_NULL;
+
+ const bool uintFormat = isUintFormat(mapTextureFormat(m_format));
+ const bool intFormat = isIntFormat(mapTextureFormat(m_format));
+ const std::string colorVecTypeName = std::string(uintFormat ? "u" : intFormat ? "i" : "") + "vec4";
+ const std::string colorScalarTypeName = std::string(uintFormat ? "uint" : intFormat ? "int" : "float");
+ const std::string invocationCoord = getCoordStr(m_imageType, "gx", "gy", "gz");
+ const std::string shaderImageFormat = getShaderImageFormatQualifier(m_format);
+ const std::string shaderImageType = getShaderImageType(m_format, m_imageType);
+
+ const tcu::UVec3 localWorkGroupSize = getLocalWorkGroupSize(m_imageType, m_imageSize);
+ const std::string localSizeX = de::toString(localWorkGroupSize.x());
+ const std::string localSizeY = de::toString(localWorkGroupSize.y());
+ const std::string localSizeZ = de::toString(localWorkGroupSize.z());
+
+ std::ostringstream programBuffer;
+
+ programBuffer
+ << versionDecl << "\n"
+ << "\n"
+ << "precision highp " << shaderImageType << ";\n"
+ << "\n"
+ << "layout (local_size_x = " << localSizeX << ", local_size_y = " << localSizeY << ", local_size_z = " + localSizeZ << ") in;\n"
+ << "layout (" << shaderImageFormat << ", binding=0) " << qualifierName << " uniform " << shaderImageType << " u_image;\n"
+ << "void main (void)\n"
+ << "{\n"
+ << " int gx = int(gl_GlobalInvocationID.x);\n"
+ << " int gy = int(gl_GlobalInvocationID.y);\n"
+ << " int gz = int(gl_GlobalInvocationID.z);\n"
+ << " imageStore(u_image, " << invocationCoord << ", " << colorVecTypeName << "(gx^gy^gz));\n"
+ << "\n"
+ << " memoryBarrier();\n"
+ << " barrier();\n"
+ << "\n"
+ << " " << colorScalarTypeName << " sum = " << colorScalarTypeName << "(0);\n"
+ << " int groupBaseX = gx/" << localSizeX << "*" << localSizeX << ";\n"
+ << " int groupBaseY = gy/" << localSizeY << "*" << localSizeY << ";\n"
+ << " int groupBaseZ = gz/" << localSizeZ << "*" << localSizeZ << ";\n"
+ << " int xOffsets[] = " << g_ShaderReadOffsetsXStr << ";\n"
+ << " int yOffsets[] = " << g_ShaderReadOffsetsYStr << ";\n"
+ << " int zOffsets[] = " << g_ShaderReadOffsetsZStr << ";\n"
+ << " for (int i = 0; i < " << de::toString(DE_LENGTH_OF_ARRAY(g_ShaderReadOffsetsX)) << "; i++)\n"
+ << " {\n"
+ << " int readX = groupBaseX + (gx + xOffsets[i]) % " + localSizeX + ";\n"
+ << " int readY = groupBaseY + (gy + yOffsets[i]) % " + localSizeY + ";\n"
+ << " int readZ = groupBaseZ + (gz + zOffsets[i]) % " + localSizeZ + ";\n"
+ << " sum += imageLoad(u_image, " << getCoordStr(m_imageType, "readX", "readY", "readZ") << ").x;\n"
+ << " }\n"
+ << "\n"
+ << " memoryBarrier();\n"
+ << " barrier();\n"
+ << "\n"
+ << " imageStore(u_image, " + invocationCoord + ", " + colorVecTypeName + "(sum));\n"
+ << "}\n";
+
+ programCollection.glslSources.add(m_name) << glu::ComputeSource(programBuffer.str());
+ }
+
+ class MemoryQualifierInstanceBase : public vkt::TestInstance
+ {
+ public:
+ MemoryQualifierInstanceBase (Context& context,
+ const std::string& name,
+ const ImageType imageType,
+ const tcu::UVec4& imageSize,
+ const tcu::TextureFormat& format);
+
+ virtual ~MemoryQualifierInstanceBase (void) {};
+
+ virtual tcu::TestStatus iterate (void);
+
+ virtual void prepareResources (const VkDeviceSize bufferSizeInBytes) = 0;
+
+ virtual void prepareDescriptors (void) = 0;
+
+ virtual void commandsBeforeCompute (const VkCommandBuffer cmdBuffer,
+ const VkDeviceSize bufferSizeInBytes) const = 0;
+
+ virtual void commandsAfterCompute (const VkCommandBuffer cmdBuffer,
+ const VkDeviceSize bufferSizeInBytes) const = 0;
+ protected:
+
+ tcu::TextureLevel generateReferenceImage (void) const;
+
+ const std::string m_name;
+ const ImageType m_imageType;
+ const tcu::UVec4 m_imageSize;
+ const tcu::TextureFormat m_format;
+
+ de::MovePtr<Buffer> m_buffer;
+ Move<VkDescriptorPool> m_descriptorPool;
+ Move<VkDescriptorSetLayout> m_descriptorSetLayout;
+ Move<VkDescriptorSet> m_descriptorSet;
+ };
+
+ MemoryQualifierInstanceBase::MemoryQualifierInstanceBase (Context& context,
+ const std::string& name,
+ const ImageType imageType,
+ const tcu::UVec4& imageSize,
+ const tcu::TextureFormat& format)
+ : vkt::TestInstance(context)
+ , m_name(name)
+ , m_imageType(imageType)
+ , m_imageSize(imageSize)
+ , m_format(format)
+ {
+ }
+
+ tcu::TestStatus MemoryQualifierInstanceBase::iterate (void)
+ {
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+ const VkQueue queue = m_context.getUniversalQueue();
+ const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
+
+ const VkDeviceSize bufferSizeInBytes = m_imageSize.x() * m_imageSize.y() * m_imageSize.z() * m_imageSize.w() * tcu::getPixelSize(m_format);
+
+ // Prepare resources for the test
+ prepareResources(bufferSizeInBytes);
+
+ // Prepare descriptor sets
+ prepareDescriptors();
+
+ // Create compute shader
+ const vk::Unique<VkShaderModule> shaderModule(createShaderModule(deviceInterface, device, m_context.getBinaryCollection().get(m_name), 0u));
+
+ // Create compute pipeline
+ const vk::Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(deviceInterface, device, *m_descriptorSetLayout));
+ const vk::Unique<VkPipeline> pipeline(makeComputePipeline(deviceInterface, device, *pipelineLayout, *shaderModule));
+
+ // Create command buffer
+ const Unique<VkCommandPool> cmdPool(makeCommandPool(deviceInterface, device, queueFamilyIndex));
+ const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(deviceInterface, device, *cmdPool));
+
+ // Start recording commands
+ beginCommandBuffer(deviceInterface, *cmdBuffer);
+
+ deviceInterface.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
+ deviceInterface.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
+
+ commandsBeforeCompute(*cmdBuffer, bufferSizeInBytes);
+
+ const tcu::UVec3 numGroups = getNumWorkGroups(m_imageType, m_imageSize);
+ deviceInterface.cmdDispatch(*cmdBuffer, numGroups.x(), numGroups.y(), numGroups.z());
+
+ commandsAfterCompute(*cmdBuffer, bufferSizeInBytes);
+
+ endCommandBuffer(deviceInterface, *cmdBuffer);
+
+ // Submit and wait for completion
+ submitCommandsAndWait(deviceInterface, device, queue, *cmdBuffer);
+
+ // Retrieve data from buffer to host memory
+ const Allocation& allocation = m_buffer->getAllocation();
+ invalidateMappedMemoryRange(deviceInterface, device, allocation.getMemory(), allocation.getOffset(), bufferSizeInBytes);
+
+ const tcu::UVec3 computeGridSize = getComputeGridSize(m_imageType, m_imageSize);
+ tcu::ConstPixelBufferAccess resultPixelBuffer(m_format, computeGridSize.x(), computeGridSize.y(), computeGridSize.z(), allocation.getHostPtr());
+
+ // Create a reference image
+ tcu::TextureLevel referenceImage = generateReferenceImage();
+ tcu::ConstPixelBufferAccess referencePixelBuffer = referenceImage.getAccess();
+
+ // Validate the result
+ if (comparePixelBuffers(m_context.getTestContext(), m_imageType, m_imageSize, m_format, referencePixelBuffer, resultPixelBuffer))
+ return tcu::TestStatus::pass("Passed");
+ else
+ return tcu::TestStatus::fail("Image comparison failed");
+ }
+
+ tcu::TextureLevel MemoryQualifierInstanceBase::generateReferenceImage (void) const
+ {
+ // Generate a reference image data using the storage format
+ const tcu::UVec3 computeGridSize = getComputeGridSize(m_imageType, m_imageSize);
+
+ tcu::TextureLevel base(m_format, computeGridSize.x(), computeGridSize.y(), computeGridSize.z());
+ tcu::PixelBufferAccess baseAccess = base.getAccess();
+
+ tcu::TextureLevel reference(m_format, computeGridSize.x(), computeGridSize.y(), computeGridSize.z());
+ tcu::PixelBufferAccess referenceAccess = reference.getAccess();
+
+ for (deInt32 z = 0; z < baseAccess.getDepth(); ++z)
+ for (deInt32 y = 0; y < baseAccess.getHeight(); ++y)
+ for (deInt32 x = 0; x < baseAccess.getWidth(); ++x)
+ {
+ baseAccess.setPixel(tcu::IVec4(x^y^z), x, y, z);
+ }
+
+ const tcu::UVec3 localWorkGroupSize = getLocalWorkGroupSize(m_imageType, m_imageSize);
+
+ for (deInt32 z = 0; z < referenceAccess.getDepth(); ++z)
+ for (deInt32 y = 0; y < referenceAccess.getHeight(); ++y)
+ for (deInt32 x = 0; x < referenceAccess.getWidth(); ++x)
+ {
+ const deInt32 groupBaseX = x / localWorkGroupSize.x() * localWorkGroupSize.x();
+ const deInt32 groupBaseY = y / localWorkGroupSize.y() * localWorkGroupSize.y();
+ const deInt32 groupBaseZ = z / localWorkGroupSize.z() * localWorkGroupSize.z();
+ deInt32 sum = 0;
+
+ for (deInt32 i = 0; i < DE_LENGTH_OF_ARRAY(g_ShaderReadOffsetsX); i++)
+ {
+ sum += baseAccess.getPixelInt(
+ groupBaseX + (x + g_ShaderReadOffsetsX[i]) % localWorkGroupSize.x(),
+ groupBaseY + (y + g_ShaderReadOffsetsY[i]) % localWorkGroupSize.y(),
+ groupBaseZ + (z + g_ShaderReadOffsetsZ[i]) % localWorkGroupSize.z()).x();
+ }
+
+ referenceAccess.setPixel(tcu::IVec4(sum), x, y, z);
+ }
+
+ return reference;
+ }
+
+ class MemoryQualifierInstanceImage : public MemoryQualifierInstanceBase
+ {
+ public:
+ MemoryQualifierInstanceImage (Context& context,
+ const std::string& name,
+ const ImageType imageType,
+ const tcu::UVec4& imageSize,
+ const tcu::TextureFormat& format)
+ : MemoryQualifierInstanceBase(context, name, imageType, imageSize, format) {}
+
+ virtual ~MemoryQualifierInstanceImage (void) {};
+
+ virtual void prepareResources (const VkDeviceSize bufferSizeInBytes);
+
+ virtual void prepareDescriptors (void);
- virtual void commandsAfterCompute (const VkCommandBuffer cmdBuffer,
++
+ virtual void commandsBeforeCompute (const VkCommandBuffer cmdBuffer,
+ const VkDeviceSize bufferSizeInBytes) const;
+
- m_imageType == IMAGE_TYPE_CUBE ||
++ virtual void commandsAfterCompute (const VkCommandBuffer cmdBuffer,
+ const VkDeviceSize bufferSizeInBytes) const;
+ protected:
+
+ de::MovePtr<Image> m_image;
+ Move<VkImageView> m_imageView;
+ };
+
+ void MemoryQualifierInstanceImage::prepareResources (const VkDeviceSize bufferSizeInBytes)
+ {
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ // Create image
+ const VkImageCreateInfo imageCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
- m_descriptorSetLayout =
++ m_imageType == IMAGE_TYPE_CUBE ||
+ m_imageType == IMAGE_TYPE_CUBE_ARRAY
+ ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u, // VkImageCreateFlags flags;
+ mapImageType(m_imageType), // VkImageType imageType;
+ mapTextureFormat(m_format), // VkFormat format;
+ vk::makeExtent3D(m_imageSize.x(), m_imageSize.y(), m_imageSize.z()), // VkExtent3D extent;
+ 1u, // deUint32 mipLevels;
+ m_imageSize.w(), // deUint32 arrayLayers;
+ VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
+ VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
+ VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_STORAGE_BIT, // VkImageUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyIndexCount;
+ DE_NULL, // const deUint32* pQueueFamilyIndices;
+ VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
+ };
+
+ m_image = de::MovePtr<Image>(new Image(deviceInterface, device, allocator, imageCreateInfo, MemoryRequirement::Any));
+
+ // Create imageView
+ const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_imageSize.w());
+ m_imageView = makeImageView(deviceInterface, device, m_image->get(), mapImageViewType(m_imageType), mapTextureFormat(m_format), subresourceRange);
+
+ // Create a buffer to store shader output (copied from image data)
+ const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(bufferSizeInBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+ m_buffer = de::MovePtr<Buffer>(new Buffer(deviceInterface, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible));
+ }
+ void MemoryQualifierInstanceImage::prepareDescriptors (void)
+ {
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+
+ // Create descriptor pool
+ m_descriptorPool =
+ DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
+ .build(deviceInterface, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+ // Create descriptor set layout
++ m_descriptorSetLayout =
+ DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(deviceInterface, device);
+
+ // Allocate descriptor set
+ m_descriptorSet = makeDescriptorSet(deviceInterface, device, *m_descriptorPool, *m_descriptorSetLayout);
+
+ // Set the bindings
+ const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, *m_imageView, VK_IMAGE_LAYOUT_GENERAL);
+
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
+ .update(deviceInterface, device);
+ }
+
+ void MemoryQualifierInstanceImage::commandsBeforeCompute (const VkCommandBuffer cmdBuffer, const VkDeviceSize bufferSizeInBytes) const
+ {
+ DE_UNREF(bufferSizeInBytes);
+
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+ const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_imageSize.w());
+
+ const VkImageMemoryBarrier imageLayoutBarrier
+ = makeImageMemoryBarrier(0u,
+ VK_ACCESS_SHADER_READ_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_GENERAL,
+ m_image->get(),
+ subresourceRange);
+
+ const void* preComputeBarriers[] = { &imageLayoutBarrier };
+ deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(preComputeBarriers), preComputeBarriers);
+ }
+
+ void MemoryQualifierInstanceImage::commandsAfterCompute (const VkCommandBuffer cmdBuffer, const VkDeviceSize bufferSizeInBytes) const
+ {
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+ const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_imageSize.w());
+
+ const VkImageMemoryBarrier imagePreCopyBarrier
+ = makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
+ VK_ACCESS_TRANSFER_READ_BIT,
+ VK_IMAGE_LAYOUT_GENERAL,
+ VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
+ m_image->get(),
+ subresourceRange);
+
+ const void* preCopyBarriers[] = { &imagePreCopyBarrier };
+ deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(preCopyBarriers), preCopyBarriers);
+
+ const VkBufferImageCopy copyParams = makeBufferImageCopy(vk::makeExtent3D(m_imageSize.x(), m_imageSize.y(), m_imageSize.z()), m_imageSize.w());
+ deviceInterface.cmdCopyImageToBuffer(cmdBuffer, m_image->get(), VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, m_buffer->get(), 1u, ©Params);
+
+ const VkBufferMemoryBarrier bufferPostCopyBarrier
+ = makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ m_buffer->get(),
+ 0ull,
+ bufferSizeInBytes);
+
+ const void* postCopyBarriers[] = { &bufferPostCopyBarrier };
+ deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(postCopyBarriers), postCopyBarriers);
+ }
+
+ class MemoryQualifierInstanceBuffer : public MemoryQualifierInstanceBase
+ {
+ public:
+ MemoryQualifierInstanceBuffer (Context& context,
+ const std::string& name,
+ const ImageType imageType,
+ const tcu::UVec4& imageSize,
+ const tcu::TextureFormat& format)
+ : MemoryQualifierInstanceBase(context, name, imageType, imageSize, format) {}
+
+ virtual ~MemoryQualifierInstanceBuffer (void) {};
+
+ virtual void prepareResources (const VkDeviceSize bufferSizeInBytes);
+
+ virtual void prepareDescriptors (void);
+
+ virtual void commandsBeforeCompute (const VkCommandBuffer,
+ const VkDeviceSize) const {}
+
+ virtual void commandsAfterCompute (const VkCommandBuffer cmdBuffer,
+ const VkDeviceSize bufferSizeInBytes) const;
+ protected:
+
+ Move<VkBufferView> m_bufferView;
+ };
+
+ void MemoryQualifierInstanceBuffer::prepareResources (const VkDeviceSize bufferSizeInBytes)
+ {
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+ Allocator& allocator = m_context.getDefaultAllocator();
+
+ // Create a buffer to store shader output
+ const VkBufferCreateInfo bufferCreateInfo = makeBufferCreateInfo(bufferSizeInBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT);
+ m_buffer = de::MovePtr<Buffer>(new Buffer(deviceInterface, device, allocator, bufferCreateInfo, MemoryRequirement::HostVisible));
+
+ m_bufferView = makeBufferView(deviceInterface, device, m_buffer->get(), mapTextureFormat(m_format), 0ull, bufferSizeInBytes);
+ }
+
+ void MemoryQualifierInstanceBuffer::prepareDescriptors (void)
+ {
+ const VkDevice device = m_context.getDevice();
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+
+ // Create descriptor pool
+ m_descriptorPool =
+ DescriptorPoolBuilder()
+ .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
+ .build(deviceInterface, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
+
+ // Create descriptor set layout
+ m_descriptorSetLayout =
+ DescriptorSetLayoutBuilder()
+ .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
+ .build(deviceInterface, device);
+
+ // Allocate descriptor set
+ m_descriptorSet = makeDescriptorSet(deviceInterface, device, *m_descriptorPool, *m_descriptorSetLayout);
+
+ // Set the bindings
+ DescriptorSetUpdateBuilder()
+ .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
+ .update(deviceInterface, device);
+ }
+
+ void MemoryQualifierInstanceBuffer::commandsAfterCompute (const VkCommandBuffer cmdBuffer, const VkDeviceSize bufferSizeInBytes) const
+ {
+ const DeviceInterface& deviceInterface = m_context.getDeviceInterface();
+
+ const VkBufferMemoryBarrier shaderWriteBarrier
+ = makeBufferMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
+ VK_ACCESS_HOST_READ_BIT,
+ m_buffer->get(),
+ 0ull,
+ bufferSizeInBytes);
+
+ const void* barriers[] = { &shaderWriteBarrier };
+ deviceInterface.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, DE_FALSE, DE_LENGTH_OF_ARRAY(barriers), barriers);
+ }
+
+ TestInstance* MemoryQualifierTestCase::createInstance (Context& context) const
+ {
+ if ( m_imageType == IMAGE_TYPE_BUFFER )
+ return new MemoryQualifierInstanceBuffer(context, m_name, m_imageType, m_imageSize, m_format);
+ else
+ return new MemoryQualifierInstanceImage(context, m_name, m_imageType, m_imageSize, m_format);
+ }
+
+ } // anonymous ns
+
+ tcu::TestCaseGroup* createImageQualifiersTests (tcu::TestContext& testCtx)
+ {
+ de::MovePtr<tcu::TestCaseGroup> imageQualifiersTests(new tcu::TestCaseGroup(testCtx, "qualifiers", "Coherent, volatile and restrict"));
+
+ struct ImageParameters
+ {
+ ImageType imageType;
+ tcu::UVec4 imageSize;
+ };
+
+ static const ImageParameters imageParametersArray[] =
+ {
+ { IMAGE_TYPE_1D, tcu::UVec4(64, 1, 1, 1) },
+ { IMAGE_TYPE_1D_ARRAY, tcu::UVec4(64, 1, 1, 8) },
+ { IMAGE_TYPE_2D, tcu::UVec4(64, 64, 1, 1) },
+ { IMAGE_TYPE_2D_ARRAY, tcu::UVec4(64, 64, 1, 8) },
+ { IMAGE_TYPE_3D, tcu::UVec4(64, 64, 8, 1) },
+ { IMAGE_TYPE_CUBE, tcu::UVec4(64, 64, 1, 6) },
+ { IMAGE_TYPE_CUBE_ARRAY, tcu::UVec4(64, 64, 1, 6*8) },
+ { IMAGE_TYPE_BUFFER, tcu::UVec4(64, 1, 1, 1) }
+ };
+
+ static const tcu::TextureFormat formats[] =
+ {
+ tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::FLOAT),
+ tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::UNSIGNED_INT32),
+ tcu::TextureFormat(tcu::TextureFormat::R, tcu::TextureFormat::SIGNED_INT32),
+ };
+
+ for (deUint32 qualifierI = 0; qualifierI < MemoryQualifierTestCase::QUALIFIER_LAST; ++qualifierI)
+ {
+ const MemoryQualifierTestCase::Qualifier memoryQualifier = (MemoryQualifierTestCase::Qualifier)qualifierI;
+ const char* const memoryQualifierName =
+ memoryQualifier == MemoryQualifierTestCase::QUALIFIER_COHERENT ? "coherent" :
+ memoryQualifier == MemoryQualifierTestCase::QUALIFIER_VOLATILE ? "volatile" :
+ memoryQualifier == MemoryQualifierTestCase::QUALIFIER_RESTRICT ? "restrict" :
+ DE_NULL;
+
+ de::MovePtr<tcu::TestCaseGroup> qualifierGroup(new tcu::TestCaseGroup(testCtx, memoryQualifierName, ""));
+
+ for (deInt32 imageTypeNdx = 0; imageTypeNdx < DE_LENGTH_OF_ARRAY(imageParametersArray); imageTypeNdx++)
+ {
+ const ImageType imageType = imageParametersArray[imageTypeNdx].imageType;
+ const tcu::UVec4 imageSize = imageParametersArray[imageTypeNdx].imageSize;
+
+ if (memoryQualifier == MemoryQualifierTestCase::QUALIFIER_RESTRICT)
+ {
+ de::MovePtr<TestCase> restrictCase = createImageQualifierRestrictCase(testCtx, imageType, getImageTypeName(imageType));
+ qualifierGroup->addChild(restrictCase.release());
+ }
+ else
+ {
+ for (deInt32 formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); formatNdx++)
+ {
+ const tcu::TextureFormat& format = formats[formatNdx];
+ const std::string formatName = getShaderImageFormatQualifier(formats[formatNdx]);
+
+ qualifierGroup->addChild(
+ new MemoryQualifierTestCase(testCtx, getImageTypeName(imageType) + std::string("_") + formatName,
+ "", memoryQualifier, imageType, imageSize, format, glu::GLSL_VERSION_440));
+ }
+ }
+ }
+
+ imageQualifiersTests->addChild(qualifierGroup.release());
+ }
+
+ return imageQualifiersTests.release();
+ }
+
+ } // image
+ } // vkt
--- /dev/null
-VkBufferImageCopy makeBufferImageCopy (const VkExtent3D extent,
+ /*------------------------------------------------------------------------
+ * Vulkan Conformance Tests
+ * ------------------------
+ *
+ * Copyright (c) 2015 Mobica Ltd.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and/or associated documentation files (the
+ * "Materials"), to deal in the Materials without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Materials, and to
+ * permit persons to whom the Materials are furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice(s) and this permission notice shall be included
+ * in all copies or substantial portions of the Materials.
+ *
+ * The Materials are Confidential Information as defined by the
+ * Khronos Membership Agreement until designated non-confidential by Khronos,
+ * at which point this condition clause shall be removed.
+ *
+ * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
+ *
+ *//*!
+ * \file
+ * \brief Image Tests Utility Classes
+ *//*--------------------------------------------------------------------*/
+
+ #include "vktImageTestsUtil.hpp"
+ #include "vkQueryUtil.hpp"
+ #include "vkTypeUtil.hpp"
+ #include "tcuTextureUtil.hpp"
+
+ using namespace vk;
+
+ namespace vkt
+ {
+ namespace image
+ {
+
+ Buffer::Buffer (const DeviceInterface& vk,
+ const VkDevice device,
+ Allocator& allocator,
+ const VkBufferCreateInfo& bufferCreateInfo,
+ const MemoryRequirement memoryRequirement)
+ {
+ m_buffer = createBuffer(vk, device, &bufferCreateInfo);
+ m_allocation = allocator.allocate(getBufferMemoryRequirements(vk, device, *m_buffer), memoryRequirement);
+ VK_CHECK(vk.bindBufferMemory(device, *m_buffer, m_allocation->getMemory(), m_allocation->getOffset()));
+ }
+
+ Image::Image (const DeviceInterface& vk,
+ const VkDevice device,
+ Allocator& allocator,
+ const VkImageCreateInfo& imageCreateInfo,
+ const MemoryRequirement memoryRequirement)
+ {
+ m_image = createImage(vk, device, &imageCreateInfo);
+ m_allocation = allocator.allocate(getImageMemoryRequirements(vk, device, *m_image), memoryRequirement);
+ VK_CHECK(vk.bindImageMemory(device, *m_image, m_allocation->getMemory(), m_allocation->getOffset()));
+ }
+
+ VkBufferCreateInfo makeBufferCreateInfo (const VkDeviceSize bufferSize,
+ const VkBufferUsageFlags usage)
+ {
+ const VkBufferCreateInfo bufferCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferCreateFlags flags;
+ bufferSize, // VkDeviceSize size;
+ usage, // VkBufferUsageFlags usage;
+ VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
+ 0u, // deUint32 queueFamilyIndexCount;
+ DE_NULL, // const deUint32* pQueueFamilyIndices;
+ };
+ return bufferCreateInfo;
+ }
+
- queueFamilyIndex, // deUint32 queueFamilyIndex;
++VkBufferImageCopy makeBufferImageCopy (const VkExtent3D extent,
+ const deUint32 arraySize)
+ {
+ const VkBufferImageCopy copyParams =
+ {
+ 0ull, // VkDeviceSize bufferOffset;
+ 0u, // deUint32 bufferRowLength;
+ 0u, // deUint32 bufferImageHeight;
+ makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, arraySize), // VkImageSubresourceLayers imageSubresource;
+ makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
+ extent, // VkExtent3D imageExtent;
+ };
+ return copyParams;
+ }
+
+ Move<VkCommandPool> makeCommandPool (const DeviceInterface& vk, const VkDevice device, const deUint32 queueFamilyIndex)
+ {
+ const VkCommandPoolCreateInfo commandPoolParams =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // VkCommandPoolCreateFlags flags;
++ queueFamilyIndex, // deUint32 queueFamilyIndex;
+ };
+ return createCommandPool(vk, device, &commandPoolParams);
+ }
+
+ Move<VkCommandBuffer> makeCommandBuffer (const DeviceInterface& vk, const VkDevice device, const VkCommandPool commandPool)
+ {
+ const VkCommandBufferAllocateInfo bufferAllocateParams =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ commandPool, // VkCommandPool commandPool;
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY, // VkCommandBufferLevel level;
+ 1u, // deUint32 bufferCount;
+ };
+ return allocateCommandBuffer(vk, device, &bufferAllocateParams);
+ }
+
+ Move<VkPipelineLayout> makePipelineLayout (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDescriptorSetLayout descriptorSetLayout)
+ {
+ const VkPipelineLayoutCreateInfo pipelineLayoutParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineLayoutCreateFlags flags;
+ 1u, // deUint32 setLayoutCount;
+ &descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ 0u, // deUint32 pushConstantRangeCount;
+ DE_NULL, // const VkPushConstantRange* pPushConstantRanges;
+ };
+ return createPipelineLayout(vk, device, &pipelineLayoutParams);
+ }
+
+ Move<VkPipeline> makeComputePipeline (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkPipelineLayout pipelineLayout,
+ const VkShaderModule shaderModule)
+ {
+ const VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
+ {
+ VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineShaderStageCreateFlags flags;
+ VK_SHADER_STAGE_COMPUTE_BIT, // VkShaderStageFlagBits stage;
+ shaderModule, // VkShaderModule module;
+ "main", // const char* pName;
+ DE_NULL, // const VkSpecializationInfo* pSpecializationInfo;
+ };
+ const VkComputePipelineCreateInfo pipelineCreateInfo =
+ {
+ VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkPipelineCreateFlags flags;
+ pipelineShaderStageParams, // VkPipelineShaderStageCreateInfo stage;
+ pipelineLayout, // VkPipelineLayout layout;
+ DE_NULL, // VkPipeline basePipelineHandle;
+ 0, // deInt32 basePipelineIndex;
+ };
+ return createComputePipeline(vk, device, DE_NULL , &pipelineCreateInfo);
+ }
+
+ Move<VkBufferView> makeBufferView (const DeviceInterface& vk,
+ const VkDevice vkDevice,
+ const VkBuffer buffer,
+ const VkFormat format,
+ const VkDeviceSize offset,
+ const VkDeviceSize size)
+ {
+ const VkBufferViewCreateInfo bufferViewParams =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkBufferViewCreateFlags flags;
+ buffer, // VkBuffer buffer;
+ format, // VkFormat format;
+ offset, // VkDeviceSize offset;
+ size, // VkDeviceSize range;
+ };
+ return createBufferView(vk, vkDevice, &bufferViewParams);
+ }
+
+ Move<VkImageView> makeImageView (const DeviceInterface& vk,
+ const VkDevice vkDevice,
+ const VkImage image,
+ const VkImageViewType imageViewType,
+ const VkFormat format,
+ const VkImageSubresourceRange subresourceRange)
+ {
+ const VkImageViewCreateInfo imageViewParams =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkImageViewCreateFlags flags;
+ image, // VkImage image;
+ imageViewType, // VkImageViewType viewType;
+ format, // VkFormat format;
+ makeComponentMappingRGBA(), // VkComponentMapping components;
+ subresourceRange, // VkImageSubresourceRange subresourceRange;
+ };
+ return createImageView(vk, vkDevice, &imageViewParams);
+ }
+
+ Move<VkDescriptorSet> makeDescriptorSet (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkDescriptorPool descriptorPool,
+ const VkDescriptorSetLayout setLayout)
+ {
+ const VkDescriptorSetAllocateInfo allocateParams =
+ {
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ descriptorPool, // VkDescriptorPool descriptorPool;
+ 1u, // deUint32 setLayoutCount;
+ &setLayout, // const VkDescriptorSetLayout* pSetLayouts;
+ };
+ return allocateDescriptorSet(vk, device, &allocateParams);
+ }
+
+ VkBufferMemoryBarrier makeBufferMemoryBarrier (const VkAccessFlags srcAccessMask,
+ const VkAccessFlags dstAccessMask,
+ const VkBuffer buffer,
+ const VkDeviceSize offset,
+ const VkDeviceSize bufferSizeBytes)
+ {
+ const VkBufferMemoryBarrier barrier =
+ {
+ VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ srcAccessMask, // VkAccessFlags srcAccessMask;
+ dstAccessMask, // VkAccessFlags dstAccessMask;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
+ buffer, // VkBuffer buffer;
+ offset, // VkDeviceSize offset;
+ bufferSizeBytes, // VkDeviceSize size;
+ };
+ return barrier;
+ }
+
+ VkImageMemoryBarrier makeImageMemoryBarrier (const VkAccessFlags srcAccessMask,
+ const VkAccessFlags dstAccessMask,
+ const VkImageLayout oldLayout,
+ const VkImageLayout newLayout,
+ const VkImage image,
+ const VkImageSubresourceRange subresourceRange)
+ {
+ const VkImageMemoryBarrier barrier =
+ {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ srcAccessMask, // VkAccessFlags outputMask;
+ dstAccessMask, // VkAccessFlags inputMask;
+ oldLayout, // VkImageLayout oldLayout;
+ newLayout, // VkImageLayout newLayout;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
+ VK_QUEUE_FAMILY_IGNORED, // deUint32 destQueueFamilyIndex;
+ image, // VkImage image;
+ subresourceRange, // VkImageSubresourceRange subresourceRange;
+ };
+ return barrier;
+ }
+
+ void beginCommandBuffer (const DeviceInterface& vk, const VkCommandBuffer commandBuffer)
+ {
+ const VkCommandBufferBeginInfo commandBufBeginParams =
+ {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkCommandBufferUsageFlags flags;
+ DE_NULL, // VkRenderPass renderPass;
+ 0u, // deUint32 subpass;
+ DE_NULL, // VkFramebuffer framebuffer;
+ DE_FALSE, // VkBool32 occlusionQueryEnable;
+ 0u, // VkQueryControlFlags queryFlags;
+ 0u, // VkQueryPipelineStatisticFlags pipelineStatistics;
+ };
+ VK_CHECK(vk.beginCommandBuffer(commandBuffer, &commandBufBeginParams));
+ }
+ void endCommandBuffer (const DeviceInterface& vk, const VkCommandBuffer commandBuffer)
+ {
+ VK_CHECK(vk.endCommandBuffer(commandBuffer));
+ }
+
+ void submitCommandsAndWait (const DeviceInterface& vk,
+ const VkDevice device,
+ const VkQueue queue,
+ const VkCommandBuffer commandBuffer)
+ {
+ const VkFenceCreateInfo fenceParams =
+ {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // VkFenceCreateFlags flags;
+ };
+ const Unique<VkFence> fence(createFence(vk, device, &fenceParams));
+
+ const VkSubmitInfo submitInfo =
+ {
+ VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
+ DE_NULL, // const void* pNext;
+ 0u, // deUint32 waitSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pWaitSemaphores;
+ 1u, // deUint32 commandBufferCount;
+ &commandBuffer, // const VkCommandBuffer* pCommandBuffers;
+ 0u, // deUint32 signalSemaphoreCount;
+ DE_NULL, // const VkSemaphore* pSignalSemaphores;
+ };
+
+ VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
+ VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
+ }
+
+ VkImageType mapImageType (const ImageType imageType)
+ {
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D:
+ case IMAGE_TYPE_1D_ARRAY:
+ case IMAGE_TYPE_BUFFER:
+ return VK_IMAGE_TYPE_1D;
+
+ case IMAGE_TYPE_2D:
+ case IMAGE_TYPE_2D_ARRAY:
+ case IMAGE_TYPE_CUBE:
+ case IMAGE_TYPE_CUBE_ARRAY:
+ return VK_IMAGE_TYPE_2D;
+
+ case IMAGE_TYPE_3D:
+ return VK_IMAGE_TYPE_3D;
+
+ default:
+ DE_ASSERT(false);
+ return VK_IMAGE_TYPE_LAST;
+ }
+ }
+
+ VkImageViewType mapImageViewType (const ImageType imageType)
+ {
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
+ case IMAGE_TYPE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
+ case IMAGE_TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
+ case IMAGE_TYPE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
+ case IMAGE_TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
+ case IMAGE_TYPE_CUBE: return VK_IMAGE_VIEW_TYPE_CUBE;
+ case IMAGE_TYPE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
+
+ default:
+ DE_ASSERT(false);
+ return VK_IMAGE_VIEW_TYPE_LAST;
+ }
+ }
+
+ std::string getImageTypeName (const ImageType imageType)
+ {
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D: return "1d";
+ case IMAGE_TYPE_1D_ARRAY: return "1d_array";
+ case IMAGE_TYPE_2D: return "2d";
+ case IMAGE_TYPE_2D_ARRAY: return "2d_array";
+ case IMAGE_TYPE_3D: return "3d";
+ case IMAGE_TYPE_CUBE: return "cube";
+ case IMAGE_TYPE_CUBE_ARRAY: return "cube_array";
+ case IMAGE_TYPE_BUFFER: return "buffer";
+
+ default:
+ DE_ASSERT(false);
+ return "";
+ }
+ }
+
+ std::string getShaderImageType (const tcu::TextureFormat& format, const ImageType imageType)
+ {
+ std::string formatPart = tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER ? "u" :
+ tcu::getTextureChannelClass(format.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER ? "i" : "";
+
+ std::string imageTypePart;
+ switch (imageType)
+ {
+ case IMAGE_TYPE_1D: imageTypePart = "1D"; break;
+ case IMAGE_TYPE_1D_ARRAY: imageTypePart = "1DArray"; break;
+ case IMAGE_TYPE_2D: imageTypePart = "2D"; break;
+ case IMAGE_TYPE_2D_ARRAY: imageTypePart = "2DArray"; break;
+ case IMAGE_TYPE_3D: imageTypePart = "3D"; break;
+ case IMAGE_TYPE_CUBE: imageTypePart = "Cube"; break;
+ case IMAGE_TYPE_CUBE_ARRAY: imageTypePart = "CubeArray"; break;
+ case IMAGE_TYPE_BUFFER: imageTypePart = "Buffer"; break;
+
+ default:
+ DE_ASSERT(false);
+ }
+
+ return formatPart + "image" + imageTypePart;
+ }
+
+ std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
+ {
+ const char* orderPart;
+ const char* typePart;
+
+ switch (format.order)
+ {
+ case tcu::TextureFormat::R: orderPart = "r"; break;
+ case tcu::TextureFormat::RG: orderPart = "rg"; break;
+ case tcu::TextureFormat::RGB: orderPart = "rgb"; break;
+ case tcu::TextureFormat::RGBA: orderPart = "rgba"; break;
+
+ default:
+ DE_ASSERT(false);
+ orderPart = DE_NULL;
+ }
+
+ switch (format.type)
+ {
+ case tcu::TextureFormat::FLOAT: typePart = "32f"; break;
+ case tcu::TextureFormat::HALF_FLOAT: typePart = "16f"; break;
+
+ case tcu::TextureFormat::UNSIGNED_INT32: typePart = "32ui"; break;
+ case tcu::TextureFormat::UNSIGNED_INT16: typePart = "16ui"; break;
+ case tcu::TextureFormat::UNSIGNED_INT8: typePart = "8ui"; break;
+
+ case tcu::TextureFormat::SIGNED_INT32: typePart = "32i"; break;
+ case tcu::TextureFormat::SIGNED_INT16: typePart = "16i"; break;
+ case tcu::TextureFormat::SIGNED_INT8: typePart = "8i"; break;
+
+ case tcu::TextureFormat::UNORM_INT16: typePart = "16"; break;
+ case tcu::TextureFormat::UNORM_INT8: typePart = "8"; break;
+
+ case tcu::TextureFormat::SNORM_INT16: typePart = "16_snorm"; break;
+ case tcu::TextureFormat::SNORM_INT8: typePart = "8_snorm"; break;
+
+ default:
+ DE_ASSERT(false);
+ typePart = DE_NULL;
+ }
+
+ return std::string() + orderPart + typePart;
+ }
+
+ } // image
+ } // vkt