1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktImageLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBarrierUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
43 #include "vkBufferWithMemory.hpp"
46 #include "deUniquePtr.hpp"
47 #include "deSharedPtr.hpp"
48 #include "deStringUtil.hpp"
50 #include "tcuImageCompare.hpp"
51 #include "tcuTexture.hpp"
52 #include "tcuTextureUtil.hpp"
53 #include "tcuFloat.hpp"
54 #include "tcuFloatFormat.hpp"
55 #include "tcuStringTemplate.hpp"
56 #include "tcuVectorUtil.hpp"
71 // Check for three-component (non-packed) format, i.e. pixel size is a multiple of 3.
72 bool formatHasThreeComponents(VkFormat format)
74 const tcu::TextureFormat texFormat = mapVkFormat(format);
75 return (getPixelSize(texFormat) % 3) == 0;
78 VkFormat getSingleComponentFormat(VkFormat format)
80 tcu::TextureFormat texFormat = mapVkFormat(format);
81 texFormat = tcu::TextureFormat(tcu::TextureFormat::R, texFormat.type);
82 return mapTextureFormat(texFormat);
85 inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
87 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
90 tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
92 switch (texture.type())
96 case IMAGE_TYPE_BUFFER:
98 DE_ASSERT(layer == 0);
101 case IMAGE_TYPE_1D_ARRAY:
102 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
104 case IMAGE_TYPE_2D_ARRAY:
105 case IMAGE_TYPE_CUBE:
106 case IMAGE_TYPE_CUBE_ARRAY:
107 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
108 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
111 DE_FATAL("Internal test error");
112 return tcu::ConstPixelBufferAccess();
116 //! \return the size in bytes of a given level of a mipmap image, including array layers.
117 vk::VkDeviceSize getMipmapLevelImageSizeBytes (const Texture& texture, const vk::VkFormat format, const deUint32 mipmapLevel)
119 tcu::IVec3 size = texture.size(mipmapLevel);
120 return tcu::getPixelSize(vk::mapVkFormat(format)) * size.x() * size.y() * size.z();
123 //! \return the size in bytes of the whole mipmap image, including all mipmap levels and array layers
124 vk::VkDeviceSize getMipmapImageTotalSizeBytes (const Texture& texture, const vk::VkFormat format)
126 vk::VkDeviceSize size = 0u;
127 deInt32 levelCount = 0u;
131 size += getMipmapLevelImageSizeBytes(texture, format, levelCount);
133 } while (levelCount < texture.numMipmapLevels());
137 //! \return true if all layers match in both pixel buffers
138 bool comparePixelBuffers (tcu::TestLog& log,
139 const Texture& texture,
140 const VkFormat format,
141 const tcu::ConstPixelBufferAccess reference,
142 const tcu::ConstPixelBufferAccess result,
143 const deUint32 mipmapLevel = 0u)
145 DE_ASSERT(reference.getFormat() == result.getFormat());
146 DE_ASSERT(reference.getSize() == result.getSize());
148 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
149 const int numLayersOrSlices = (is3d ? texture.size(mipmapLevel).z() : texture.numLayers());
150 const int numCubeFaces = 6;
152 int passedLayers = 0;
153 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
155 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
156 const std::string comparisonDesc = "Image Comparison, " +
157 (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
158 is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx) + " , level " + de::toString(mipmapLevel));
160 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
161 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
165 switch (tcu::getTextureChannelClass(mapVkFormat(format).type))
167 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
168 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
170 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
174 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
176 // Allow error of minimum representable difference
177 tcu::Vec4 threshold(1.0f / ((tcu::UVec4(1u) << tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>()) - 1u).cast<float>());
179 // Add 1 ULP of fp32 imprecision to account for image comparison fp32 math with unorm->float conversions.
180 threshold += tcu::Vec4(std::numeric_limits<float>::epsilon());
182 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
186 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
188 const tcu::UVec4 bitDepth = tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>() - 1u;
189 // To avoid bit-shifting with negative value, which is undefined behaviour.
190 const tcu::UVec4 fixedBitDepth = tcu::select(bitDepth, tcu::UVec4(0u, 0u, 0u, 0u), tcu::greaterThanEqual(bitDepth.cast<deInt32>(), tcu::IVec4(0, 0, 0, 0)));
192 // Allow error of minimum representable difference
193 const tcu::Vec4 threshold (1.0f / ((tcu::UVec4(1u) << fixedBitDepth) - 1u).cast<float>());
195 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
199 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
201 // Convert target format ulps to float ulps and allow 1 ulp difference
202 const tcu::UVec4 threshold (tcu::UVec4(1u) << (tcu::UVec4(23) - tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>()));
204 ok = tcu::floatUlpThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
209 DE_FATAL("Unknown channel class");
216 return passedLayers == numLayersOrSlices;
219 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
220 void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
222 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
224 for (int z = 0; z < access.getDepth(); ++z)
225 for (int y = 0; y < access.getHeight(); ++y)
226 for (int x = 0; x < access.getWidth(); ++x)
228 const tcu::Vec4 color(access.getPixel(x, y, z));
229 tcu::Vec4 newColor = color;
231 for (int i = 0; i < 4; ++i)
233 if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
235 const tcu::Float16 f(color[i]);
236 if (f.isDenorm() || f.isInf() || f.isNaN())
241 const tcu::Float32 f(color[i]);
242 if (f.isDenorm() || f.isInf() || f.isNaN())
247 if (newColor != color)
248 access.setPixel(newColor, x, y, z);
252 //!< replace invalid pixels in the image (-128)
253 void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
255 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
257 for (int z = 0; z < access.getDepth(); ++z)
258 for (int y = 0; y < access.getHeight(); ++y)
259 for (int x = 0; x < access.getWidth(); ++x)
261 const tcu::IVec4 color(access.getPixelInt(x, y, z));
262 tcu::IVec4 newColor = color;
264 for (int i = 0; i < 4; ++i)
266 const deInt32 oldColor(color[i]);
267 if (oldColor == -128) newColor[i] = -127;
270 if (newColor != color)
271 access.setPixel(newColor, x, y, z);
275 tcu::Vec4 getMiddleValue(VkFormat imageFormat)
277 tcu::TextureFormat format = mapVkFormat(imageFormat);
278 tcu::TextureFormatInfo fmtInfo = tcu::getTextureFormatInfo(format);
279 tcu::Vec4 val = (fmtInfo.valueMax - fmtInfo.valueMin) * tcu::Vec4(0.5f);
281 if (isIntegerFormat(imageFormat))
287 tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat, bool constantValue = false)
289 // Generate a reference image data using the storage format
291 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
292 const tcu::PixelBufferAccess access = reference.getAccess();
294 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
295 const float storeColorBias = computeStoreColorBias(imageFormat);
297 const bool srgbFormat = isSrgbFormat(imageFormat);
298 const bool intFormat = isIntegerFormat(imageFormat);
299 const bool storeNegativeValues = isSignedFormat(imageFormat) && (storeColorBias == 0);
300 const int xMax = imageSize.x() - 1;
301 const int yMax = imageSize.y() - 1;
303 for (int z = 0; z < imageSize.z(); ++z)
304 for (int y = 0; y < imageSize.y(); ++y)
305 for (int x = 0; x < imageSize.x(); ++x)
309 access.setPixel(getMiddleValue(imageFormat), x, y, z);
313 tcu::IVec4 color = tcu::IVec4(x ^ y ^ z, (xMax - x) ^ y ^ z, x ^ (yMax - y) ^ z, (xMax - x) ^ (yMax - y) ^ z);
315 if (storeNegativeValues)
316 color -= tcu::IVec4(deRoundFloatToInt32((float)de::max(xMax, yMax) / 2.0f));
319 access.setPixel(color, x, y, z);
323 access.setPixel(tcu::linearToSRGB(color.asFloat() * storeColorScale + storeColorBias), x, y, z);
325 access.setPixel(color.asFloat() * storeColorScale + storeColorBias, x, y, z);
330 // If the image is to be accessed as a float texture, get rid of invalid values
332 if (isFloatFormat(readFormat) && imageFormat != readFormat)
333 replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
334 if (isSnormFormat(readFormat) && imageFormat != readFormat)
335 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
340 inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, bool constantValue = false)
342 return generateReferenceImage(imageSize, imageFormat, imageFormat, constantValue);
345 void flipHorizontally (const tcu::PixelBufferAccess access)
347 const int xMax = access.getWidth() - 1;
348 const int halfWidth = access.getWidth() / 2;
350 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
351 for (int z = 0; z < access.getDepth(); z++)
352 for (int y = 0; y < access.getHeight(); y++)
353 for (int x = 0; x < halfWidth; x++)
355 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
356 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
357 access.setPixel(temp, x, y, z);
360 for (int z = 0; z < access.getDepth(); z++)
361 for (int y = 0; y < access.getHeight(); y++)
362 for (int x = 0; x < halfWidth; x++)
364 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
365 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
366 access.setPixel(temp, x, y, z);
370 inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
372 return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
375 void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
377 const DeviceInterface& vk = context.getDeviceInterface();
379 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, texture.numMipmapLevels(), 0u, texture.numLayers());
380 const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
381 VK_ACCESS_SHADER_WRITE_BIT, 0u,
382 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
383 image, fullImageSubresourceRange);
385 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
388 void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
390 const DeviceInterface& vk = context.getDeviceInterface();
392 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
393 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
394 buffer, 0ull, bufferSizeBytes);
396 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
399 //! Copy all layers of an image to a buffer.
400 void commandCopyImageToBuffer (Context& context,
401 const VkCommandBuffer cmdBuffer,
403 const VkBuffer buffer,
404 const VkDeviceSize bufferSizeBytes,
405 const Texture& texture)
407 const DeviceInterface& vk = context.getDeviceInterface();
409 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
410 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
411 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
412 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
413 image, fullImageSubresourceRange);
415 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
417 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
418 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
419 buffer, 0ull, bufferSizeBytes);
421 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
422 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
423 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
426 //! Copy all layers of a mipmap image to a buffer.
427 void commandCopyMipmapImageToBuffer (Context& context,
428 const VkCommandBuffer cmdBuffer,
430 const VkFormat imageFormat,
431 const VkBuffer buffer,
432 const VkDeviceSize bufferSizeBytes,
433 const Texture& texture)
435 const DeviceInterface& vk = context.getDeviceInterface();
437 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, texture.numMipmapLevels(), 0u, texture.numLayers());
438 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
439 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
440 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
441 image, fullImageSubresourceRange);
443 std::vector<VkBufferImageCopy> copyRegions;
444 VkDeviceSize bufferOffset = 0u;
445 for (deInt32 levelNdx = 0; levelNdx < texture.numMipmapLevels(); levelNdx++)
447 const VkBufferImageCopy copyParams =
449 bufferOffset, // VkDeviceSize bufferOffset;
450 0u, // deUint32 bufferRowLength;
451 0u, // deUint32 bufferImageHeight;
452 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, 0u, texture.numLayers()), // VkImageSubresourceLayers imageSubresource;
453 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
454 makeExtent3D(texture.layerSize(levelNdx)), // VkExtent3D imageExtent;
456 copyRegions.push_back(copyParams);
457 bufferOffset += getMipmapLevelImageSizeBytes(texture, imageFormat, levelNdx);
460 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
461 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
462 buffer, 0ull, bufferSizeBytes);
464 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
465 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, (deUint32) copyRegions.size(), copyRegions.data());
466 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
469 class StoreTest : public TestCase
474 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
475 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 0x2, //!< Declare the format of the images in the shader code
476 FLAG_MINALIGN = 0x4, //!< Use bufferview offset that matches the advertised minimum alignment
477 FLAG_STORE_CONSTANT_VALUE = 0x8, //!< Store constant value
480 StoreTest (tcu::TestContext& testCtx,
481 const std::string& name,
482 const std::string& description,
483 const Texture& texture,
484 const VkFormat format,
485 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER);
487 virtual void checkSupport (Context& context) const;
488 void initPrograms (SourceCollections& programCollection) const;
489 TestInstance* createInstance (Context& context) const;
492 const Texture m_texture;
493 const VkFormat m_format;
494 const bool m_declareImageFormatInShader;
495 const bool m_singleLayerBind;
496 const bool m_minalign;
497 const bool m_storeConstantValue;
500 StoreTest::StoreTest (tcu::TestContext& testCtx,
501 const std::string& name,
502 const std::string& description,
503 const Texture& texture,
504 const VkFormat format,
505 const deUint32 flags)
506 : TestCase (testCtx, name, description)
507 , m_texture (texture)
509 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
510 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
511 , m_minalign ((flags & FLAG_MINALIGN) != 0)
512 , m_storeConstantValue ((flags & FLAG_STORE_CONSTANT_VALUE) != 0)
514 if (m_singleLayerBind)
515 DE_ASSERT(m_texture.numLayers() > 1);
518 void StoreTest::checkSupport (Context& context) const
520 #ifndef CTS_USES_VULKANSC
521 const VkFormatProperties3 formatProperties (context.getFormatProperties(m_format));
523 if (!m_declareImageFormatInShader && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
524 TCU_THROW(NotSupportedError, "Format not supported for unformatted stores via storage buffer");
526 if (!m_declareImageFormatInShader && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR))
527 TCU_THROW(NotSupportedError, "Format not supported for unformatted stores via storage images");
529 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
530 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
532 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
533 TCU_THROW(NotSupportedError, "Format not supported for storage images");
535 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
536 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
538 const VkFormatProperties formatProperties(getPhysicalDeviceFormatProperties(context.getInstanceInterface(), context.getPhysicalDevice(), m_format));
540 if (!m_declareImageFormatInShader)
541 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT);
543 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
544 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
546 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
547 TCU_THROW(NotSupportedError, "Format not supported for storage images");
549 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
550 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
551 #endif // CTS_USES_VULKANSC
554 void StoreTest::initPrograms (SourceCollections& programCollection) const
556 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
557 const float storeColorBias = computeStoreColorBias(m_format);
558 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
560 const deUint32 xMax = m_texture.size().x() - 1;
561 const deUint32 yMax = m_texture.size().y() - 1;
562 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
563 const bool storeNegativeValues = isSignedFormat(m_format) && (storeColorBias == 0);
564 bool useClamp = false;
565 std::string colorBaseExpr = signednessPrefix + "vec4(";
567 std::string colorExpr;
569 if (m_storeConstantValue)
571 tcu::Vec4 val = getMiddleValue(m_format);
573 if (isIntegerFormat(m_format))
575 colorExpr = colorBaseExpr
576 + de::toString(static_cast<deInt64>(val.x())) + ", "
577 + de::toString(static_cast<deInt64>(val.y())) + ", "
578 + de::toString(static_cast<deInt64>(val.z())) + ", "
579 + de::toString(static_cast<deInt64>(val.w())) + ")";
583 colorExpr = colorBaseExpr
584 + de::toString(val.x()) + ", "
585 + de::toString(val.y()) + ", "
586 + de::toString(val.z()) + ", "
587 + de::toString(val.w()) + ")";
592 colorBaseExpr = colorBaseExpr
594 + "(" + de::toString(xMax) + "-gx)^gy^gz, "
595 + "gx^(" + de::toString(yMax) + "-gy)^gz, "
596 + "(" + de::toString(xMax) + "-gx)^(" + de::toString(yMax) + "-gy)^gz)";
598 // Large integer values may not be represented with formats with low bit depths
599 if (isIntegerFormat(m_format))
601 const deInt64 minStoreValue = storeNegativeValues ? 0 - deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : 0;
602 const deInt64 maxStoreValue = storeNegativeValues ? deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : de::max(xMax, yMax);
604 useClamp = !isRepresentableIntegerValue(tcu::Vector<deInt64, 4>(minStoreValue), mapVkFormat(m_format)) ||
605 !isRepresentableIntegerValue(tcu::Vector<deInt64, 4>(maxStoreValue), mapVkFormat(m_format));
608 // Clamp if integer value cannot be represented with the current format
611 const tcu::IVec4 bitDepths = tcu::getTextureFormatBitDepth(mapVkFormat(m_format));
612 tcu::IVec4 minRepresentableValue;
613 tcu::IVec4 maxRepresentableValue;
615 switch (tcu::getTextureChannelClass(mapVkFormat(m_format).type))
617 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
619 minRepresentableValue = tcu::IVec4(0);
620 maxRepresentableValue = (tcu::IVec4(1) << bitDepths) - tcu::IVec4(1);
624 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
626 minRepresentableValue = -(tcu::IVec4(1) << bitDepths - tcu::IVec4(1));
627 maxRepresentableValue = (tcu::IVec4(1) << (bitDepths - tcu::IVec4(1))) - tcu::IVec4(1);
632 DE_ASSERT(isIntegerFormat(m_format));
635 colorBaseExpr = "clamp(" + colorBaseExpr + ", "
636 + signednessPrefix + "vec4" + de::toString(minRepresentableValue) + ", "
637 + signednessPrefix + "vec4" + de::toString(maxRepresentableValue) + ")";
640 colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
641 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
643 if (storeNegativeValues)
644 colorExpr += "-" + de::toString(deRoundFloatToInt32((float)deMax32(xMax, yMax) / 2.0f));
647 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
648 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
650 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
651 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
653 std::ostringstream src;
654 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
656 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
657 if (m_declareImageFormatInShader)
659 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
660 src << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr << " u_image;\n";
663 src << "layout (binding = 0) writeonly uniform " << imageTypeStr << " u_image;\n";
665 if (m_singleLayerBind)
666 src << "layout (binding = 1) readonly uniform Constants {\n"
667 << " int u_layerNdx;\n"
671 << "void main (void)\n"
673 << " int gx = int(gl_GlobalInvocationID.x);\n"
674 << " int gy = int(gl_GlobalInvocationID.y);\n"
675 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
676 << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
679 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
682 //! Generic test iteration algorithm for image tests
683 class BaseTestInstance : public TestInstance
686 BaseTestInstance (Context& context,
687 const Texture& texture,
688 const VkFormat format,
689 const bool declareImageFormatInShader,
690 const bool singleLayerBind,
692 const bool bufferLoadUniform);
694 tcu::TestStatus iterate (void);
696 virtual ~BaseTestInstance (void) {}
699 virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
700 virtual tcu::TestStatus verifyResult (void) = 0;
702 virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
703 virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
704 virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
706 virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
707 const VkPipelineLayout pipelineLayout,
708 const int layerNdx) = 0;
709 virtual deUint32 getViewOffset (Context& context,
710 const VkFormat format,
713 const Texture m_texture;
714 const VkFormat m_format;
715 const bool m_declareImageFormatInShader;
716 const bool m_singleLayerBind;
717 const bool m_minalign;
718 const bool m_bufferLoadUniform;
719 const deUint32 m_srcViewOffset;
720 const deUint32 m_dstViewOffset;
723 BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign, const bool bufferLoadUniform)
724 : TestInstance (context)
725 , m_texture (texture)
727 , m_declareImageFormatInShader (declareImageFormatInShader)
728 , m_singleLayerBind (singleLayerBind)
729 , m_minalign (minalign)
730 , m_bufferLoadUniform (bufferLoadUniform)
731 , m_srcViewOffset (getViewOffset(context, format, m_bufferLoadUniform))
732 , m_dstViewOffset (getViewOffset(context, formatHasThreeComponents(format) ? getSingleComponentFormat(format) : format, false))
736 tcu::TestStatus BaseTestInstance::iterate (void)
738 const DeviceInterface& vk = m_context.getDeviceInterface();
739 const VkDevice device = m_context.getDevice();
740 const VkQueue queue = m_context.getUniversalQueue();
741 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
743 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
745 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
746 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
747 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
749 const Unique<VkCommandPool> cmdPool(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex));
750 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
752 beginCommandBuffer(vk, *cmdBuffer);
754 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
755 commandBeforeCompute(*cmdBuffer);
757 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
758 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
759 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
761 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
764 commandBetweenShaderInvocations(*cmdBuffer);
766 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
769 commandAfterCompute(*cmdBuffer);
771 endCommandBuffer(vk, *cmdBuffer);
773 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
775 return verifyResult();
778 //! Base store test implementation
779 class StoreTestInstance : public BaseTestInstance
782 StoreTestInstance (Context& context,
783 const Texture& texture,
784 const VkFormat format,
785 const bool declareImageFormatInShader,
786 const bool singleLayerBind,
788 const bool storeConstantValue);
791 virtual tcu::TestStatus verifyResult (void);
793 // Add empty implementations for functions that might be not needed
794 void commandBeforeCompute (const VkCommandBuffer) {}
795 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
796 void commandAfterCompute (const VkCommandBuffer) {}
798 de::MovePtr<BufferWithMemory> m_imageBuffer;
799 const VkDeviceSize m_imageSizeBytes;
800 bool m_storeConstantValue;
803 deUint32 BaseTestInstance::getViewOffset(Context& context,
804 const VkFormat format,
809 if (!context.getTexelBufferAlignmentFeaturesEXT().texelBufferAlignment)
810 return (deUint32)context.getDeviceProperties().limits.minTexelBufferOffsetAlignment;
812 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT alignmentProperties;
813 deMemset(&alignmentProperties, 0, sizeof(alignmentProperties));
814 alignmentProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT;
816 VkPhysicalDeviceProperties2 properties2;
817 deMemset(&properties2, 0, sizeof(properties2));
818 properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
819 properties2.pNext = &alignmentProperties;
821 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties2);
823 VkBool32 singleTexelAlignment = uniform ? alignmentProperties.uniformTexelBufferOffsetSingleTexelAlignment :
824 alignmentProperties.storageTexelBufferOffsetSingleTexelAlignment;
825 VkDeviceSize align = uniform ? alignmentProperties.uniformTexelBufferOffsetAlignmentBytes :
826 alignmentProperties.storageTexelBufferOffsetAlignmentBytes;
828 VkDeviceSize texelSize = formatHasThreeComponents(format) ? tcu::getChannelSize(vk::mapVkFormat(format).type) : tcu::getPixelSize(vk::mapVkFormat(format));
830 if (singleTexelAlignment)
831 align = de::min(align, texelSize);
833 return (deUint32)align;
839 StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign, const bool storeConstantValue)
840 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, false)
841 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
842 , m_storeConstantValue (storeConstantValue)
844 const DeviceInterface& vk = m_context.getDeviceInterface();
845 const VkDevice device = m_context.getDevice();
846 Allocator& allocator = m_context.getDefaultAllocator();
848 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
850 m_imageBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
851 vk, device, allocator,
852 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
853 MemoryRequirement::HostVisible));
856 tcu::TestStatus StoreTestInstance::verifyResult (void)
858 const DeviceInterface& vk = m_context.getDeviceInterface();
859 const VkDevice device = m_context.getDevice();
861 const tcu::IVec3 imageSize = m_texture.size();
862 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format, m_storeConstantValue);
864 const Allocation& alloc = m_imageBuffer->getAllocation();
865 invalidateAlloc(vk, device, alloc);
866 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, (const char *)alloc.getHostPtr() + m_dstViewOffset);
868 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
869 return tcu::TestStatus::pass("Passed");
871 return tcu::TestStatus::fail("Image comparison failed");
874 //! Store test for images
875 class ImageStoreTestInstance : public StoreTestInstance
878 ImageStoreTestInstance (Context& context,
879 const Texture& texture,
880 const VkFormat format,
881 const bool declareImageFormatInShader,
882 const bool singleLayerBind,
884 const bool storeConstantValue);
887 VkDescriptorSetLayout prepareDescriptors (void);
888 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
889 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
890 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
892 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
893 const VkPipelineLayout pipelineLayout,
896 de::MovePtr<Image> m_image;
897 de::MovePtr<BufferWithMemory> m_constantsBuffer;
898 const VkDeviceSize m_constantsBufferChunkSizeBytes;
899 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
900 Move<VkDescriptorPool> m_descriptorPool;
901 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
902 std::vector<SharedVkImageView> m_allImageViews;
905 ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
906 const Texture& texture,
907 const VkFormat format,
908 const bool declareImageFormatInShader,
909 const bool singleLayerBind,
911 const bool storeConstantValue)
912 : StoreTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, storeConstantValue)
913 , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context.getInstanceInterface(), context.getPhysicalDevice(), sizeof(deUint32)))
914 , m_allDescriptorSets (texture.numLayers())
915 , m_allImageViews (texture.numLayers())
917 const DeviceInterface& vk = m_context.getDeviceInterface();
918 const VkDevice device = m_context.getDevice();
919 Allocator& allocator = m_context.getDefaultAllocator();
921 m_image = de::MovePtr<Image>(new Image(
922 vk, device, allocator,
923 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
924 MemoryRequirement::Any));
926 // This buffer will be used to pass constants to the shader
928 const int numLayers = m_texture.numLayers();
929 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
930 m_constantsBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
931 vk, device, allocator,
932 makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
933 MemoryRequirement::HostVisible));
936 const Allocation& alloc = m_constantsBuffer->getAllocation();
937 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
939 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
941 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
943 deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
944 *valuePtr = static_cast<deUint32>(layerNdx);
947 flushAlloc(vk, device, alloc);
951 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
953 const DeviceInterface& vk = m_context.getDeviceInterface();
954 const VkDevice device = m_context.getDevice();
956 const int numLayers = m_texture.numLayers();
957 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
958 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
959 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
962 m_descriptorPool = DescriptorPoolBuilder()
963 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
964 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
965 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
967 if (m_singleLayerBind)
969 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
971 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
972 m_allImageViews[layerNdx] = makeVkSharedPtr(makeImageView(
973 vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
974 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
977 else // bind all layers at once
979 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
980 m_allImageViews[0] = makeVkSharedPtr(makeImageView(
981 vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
982 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
985 return *m_descriptorSetLayout; // not passing the ownership
988 void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
990 const DeviceInterface& vk = m_context.getDeviceInterface();
991 const VkDevice device = m_context.getDevice();
993 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
994 const VkImageView imageView = **m_allImageViews[layerNdx];
996 const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
998 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
999 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
1000 m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
1002 DescriptorSetUpdateBuilder()
1003 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
1004 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
1005 .update(vk, device);
1006 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1009 void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1011 const DeviceInterface& vk = m_context.getDeviceInterface();
1013 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1014 const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
1015 0u, VK_ACCESS_SHADER_WRITE_BIT,
1016 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1017 m_image->get(), fullImageSubresourceRange);
1019 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
1020 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
1021 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1022 m_constantsBuffer->get(), 0ull, constantsBufferSize);
1024 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
1027 void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1029 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
1032 void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1034 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1037 //! Store test for buffers
1038 class BufferStoreTestInstance : public StoreTestInstance
1041 BufferStoreTestInstance (Context& context,
1042 const Texture& texture,
1043 const VkFormat format,
1044 const bool declareImageFormatInShader,
1045 const bool minalign,
1046 const bool storeConstantValue);
1049 VkDescriptorSetLayout prepareDescriptors (void);
1050 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1052 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1053 const VkPipelineLayout pipelineLayout,
1054 const int layerNdx);
1056 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1057 Move<VkDescriptorPool> m_descriptorPool;
1058 Move<VkDescriptorSet> m_descriptorSet;
1059 Move<VkBufferView> m_bufferView;
1062 BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
1063 const Texture& texture,
1064 const VkFormat format,
1065 const bool declareImageFormatInShader,
1066 const bool minalign,
1067 const bool storeConstantValue)
1068 : StoreTestInstance(context, texture, format, declareImageFormatInShader, false, minalign, storeConstantValue)
1072 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
1074 const DeviceInterface& vk = m_context.getDeviceInterface();
1075 const VkDevice device = m_context.getDevice();
1077 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1078 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1081 m_descriptorPool = DescriptorPoolBuilder()
1082 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1083 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1085 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1086 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_dstViewOffset, m_imageSizeBytes);
1088 return *m_descriptorSetLayout; // not passing the ownership
1091 void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1093 DE_ASSERT(layerNdx == 0);
1096 const VkDevice device = m_context.getDevice();
1097 const DeviceInterface& vk = m_context.getDeviceInterface();
1099 DescriptorSetUpdateBuilder()
1100 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
1101 .update(vk, device);
1102 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1105 void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1107 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes + m_dstViewOffset);
1110 class LoadStoreTest : public TestCase
1115 FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
1116 FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
1117 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 1 << 2, //!< Declare the format of the images in the shader code
1118 FLAG_MINALIGN = 1 << 3, //!< Use bufferview offset that matches the advertised minimum alignment
1119 FLAG_UNIFORM_TEXEL_BUFFER = 1 << 4, //!< Load from a uniform texel buffer rather than a storage texel buffer
1122 LoadStoreTest (tcu::TestContext& testCtx,
1123 const std::string& name,
1124 const std::string& description,
1125 const Texture& texture,
1126 const VkFormat format,
1127 const VkFormat imageFormat,
1128 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER,
1129 const deBool imageLoadStoreLodAMD = DE_FALSE);
1131 virtual void checkSupport (Context& context) const;
1132 void initPrograms (SourceCollections& programCollection) const;
1133 TestInstance* createInstance (Context& context) const;
1136 const Texture m_texture;
1137 const VkFormat m_format; //!< Format as accessed in the shader
1138 const VkFormat m_imageFormat; //!< Storage format
1139 const bool m_declareImageFormatInShader; //!< Whether the shader will specify the format layout qualifier of the images
1140 const bool m_singleLayerBind;
1141 const bool m_restrictImages;
1142 const bool m_minalign;
1143 bool m_bufferLoadUniform;
1144 const deBool m_imageLoadStoreLodAMD;
1147 LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
1148 const std::string& name,
1149 const std::string& description,
1150 const Texture& texture,
1151 const VkFormat format,
1152 const VkFormat imageFormat,
1153 const deUint32 flags,
1154 const deBool imageLoadStoreLodAMD)
1155 : TestCase (testCtx, name, description)
1156 , m_texture (texture)
1158 , m_imageFormat (imageFormat)
1159 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
1160 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
1161 , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
1162 , m_minalign ((flags & FLAG_MINALIGN) != 0)
1163 , m_bufferLoadUniform ((flags & FLAG_UNIFORM_TEXEL_BUFFER) != 0)
1164 , m_imageLoadStoreLodAMD (imageLoadStoreLodAMD)
1166 if (m_singleLayerBind)
1167 DE_ASSERT(m_texture.numLayers() > 1);
1169 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
1172 void LoadStoreTest::checkSupport (Context& context) const
1174 #ifndef CTS_USES_VULKANSC
1175 const VkFormatProperties3 formatProperties (context.getFormatProperties(m_format));
1176 const VkFormatProperties3 imageFormatProperties (context.getFormatProperties(m_imageFormat));
1178 if (m_imageLoadStoreLodAMD)
1179 context.requireDeviceFunctionality("VK_AMD_shader_image_load_store_lod");
1181 if (!m_bufferLoadUniform && !m_declareImageFormatInShader && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR))
1182 TCU_THROW(NotSupportedError, "Format not supported for unformatted loads via storage images");
1184 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
1185 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1187 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1188 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1190 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageFormatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1191 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1193 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1194 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1196 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageFormatProperties.optimalTilingFeatures))
1197 TCU_THROW(NotSupportedError, "Underlying format not supported at all for images");
1199 if ((m_texture.type() == IMAGE_TYPE_BUFFER) && !(imageFormatProperties.bufferFeatures))
1200 TCU_THROW(NotSupportedError, "Underlying format not supported at all for buffers");
1202 if (formatHasThreeComponents(m_format))
1204 // When the source buffer is three-component, the destination buffer is single-component.
1205 VkFormat dstFormat = getSingleComponentFormat(m_format);
1206 const VkFormatProperties3 dstFormatProperties (context.getFormatProperties(dstFormat));
1208 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(dstFormatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1209 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1212 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1213 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1215 if (m_bufferLoadUniform && m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT))
1216 TCU_THROW(NotSupportedError, "Format not supported for uniform texel buffers");
1218 const vk::VkFormatProperties formatProperties (vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
1219 context.getPhysicalDevice(),
1221 const vk::VkFormatProperties imageFormatProperties (vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
1222 context.getPhysicalDevice(),
1224 if (m_imageLoadStoreLodAMD)
1225 context.requireDeviceFunctionality("VK_AMD_shader_image_load_store_lod");
1227 if (!m_bufferLoadUniform && !m_declareImageFormatInShader)
1228 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT);
1230 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
1231 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1233 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1234 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1236 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageFormatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1237 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1239 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1240 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1242 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageFormatProperties.optimalTilingFeatures))
1243 TCU_THROW(NotSupportedError, "Underlying format not supported at all for images");
1245 if ((m_texture.type() == IMAGE_TYPE_BUFFER) && !(imageFormatProperties.bufferFeatures))
1246 TCU_THROW(NotSupportedError, "Underlying format not supported at all for buffers");
1248 if (formatHasThreeComponents(m_format))
1250 // When the source buffer is three-component, the destination buffer is single-component.
1251 VkFormat dstFormat = getSingleComponentFormat(m_format);
1252 const vk::VkFormatProperties dstFormatProperties (vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
1253 context.getPhysicalDevice(),
1256 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(dstFormatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1257 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1260 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1261 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1263 if (m_bufferLoadUniform && m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT))
1264 TCU_THROW(NotSupportedError, "Format not supported for uniform texel buffers");
1265 #endif // CTS_USES_VULKANSC
1268 void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
1270 const tcu::TextureFormat texFormat = mapVkFormat(m_format);
1271 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
1272 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
1273 const std::string formatQualifierStr = getShaderImageFormatQualifier(texFormat);
1274 const std::string uniformTypeStr = getFormatPrefix(texFormat) + "textureBuffer";
1275 const std::string imageTypeStr = getShaderImageType(texFormat, usedImageType);
1276 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
1277 const std::string xMax = de::toString(m_texture.size().x() - 1);
1279 std::ostringstream src;
1280 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
1282 if (!m_declareImageFormatInShader)
1284 src << "#extension GL_EXT_shader_image_load_formatted : require\n";
1287 if (m_imageLoadStoreLodAMD)
1289 src << "#extension GL_AMD_shader_image_load_store_lod : require\n";
1292 src << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
1293 if (m_bufferLoadUniform)
1294 src << "layout (binding = 0) uniform " << uniformTypeStr << " u_image0;\n";
1295 else if (m_declareImageFormatInShader)
1296 src << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
1298 src << "layout (binding = 0) " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
1300 if (formatHasThreeComponents(m_format))
1301 src << "layout (binding = 1) " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n";
1303 src << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n";
1306 << "void main (void)\n"
1310 default: DE_ASSERT(0); // fallthrough
1312 if (m_bufferLoadUniform)
1314 // for three-component formats, the dst buffer is single-component and the shader
1315 // expands the store into 3 component-wise stores.
1316 std::string type = getFormatPrefix(texFormat) + "vec4";
1317 src << " int pos = int(gl_GlobalInvocationID.x);\n"
1318 " " << type << " t = texelFetch(u_image0, " + xMax + "-pos);\n";
1319 if (formatHasThreeComponents(m_format))
1321 src << " imageStore(u_image1, 3*pos+0, " << type << "(t.x));\n";
1322 src << " imageStore(u_image1, 3*pos+1, " << type << "(t.y));\n";
1323 src << " imageStore(u_image1, 3*pos+2, " << type << "(t.z));\n";
1326 src << " imageStore(u_image1, pos, t);\n";
1328 else if (m_imageLoadStoreLodAMD)
1331 " int pos = int(gl_GlobalInvocationID.x);\n";
1333 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1335 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1336 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) + ", imageLoadLodAMD(u_image0, " + xMaxSize + "-pos, " + de::toString(levelNdx) + "));\n";
1342 " int pos = int(gl_GlobalInvocationID.x);\n"
1343 " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n";
1347 if (m_imageLoadStoreLodAMD)
1349 src << " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n";
1351 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1353 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1354 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) + ", imageLoadLodAMD(u_image0, ivec2(" + xMaxSize + "-pos.x, pos.y), " + de::toString(levelNdx) + "));\n";
1361 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
1362 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n";
1366 if (m_imageLoadStoreLodAMD)
1368 src << " ivec3 pos = ivec3(gl_GlobalInvocationID);\n";
1370 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1372 std::string xMaxSize = de::toString(deMax32(((m_texture.layerSize().x() >> levelNdx) - 1), 1u));
1373 src << " imageStoreLodAMD(u_image1, pos, " + de::toString(levelNdx) + ", imageLoadLodAMD(u_image0, ivec3(" + xMaxSize + "-pos.x, pos.y, pos.z), " + de::toString(levelNdx) + "));\n";
1379 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
1380 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n";
1386 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
1389 //! Load/store test base implementation
1390 class LoadStoreTestInstance : public BaseTestInstance
1393 LoadStoreTestInstance (Context& context,
1394 const Texture& texture,
1395 const VkFormat format,
1396 const VkFormat imageFormat,
1397 const bool declareImageFormatInShader,
1398 const bool singleLayerBind,
1399 const bool minalign,
1400 const bool bufferLoadUniform);
1403 virtual BufferWithMemory* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
1405 tcu::TestStatus verifyResult (void);
1407 // Add empty implementations for functions that might be not needed
1408 void commandBeforeCompute (const VkCommandBuffer) {}
1409 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
1410 void commandAfterCompute (const VkCommandBuffer) {}
1412 de::MovePtr<BufferWithMemory> m_imageBuffer; //!< Source data and helper buffer
1413 const VkDeviceSize m_imageSizeBytes;
1414 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
1415 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
1417 bool m_bufferLoadUniform;
1418 VkDescriptorType m_bufferLoadDescriptorType;
1419 VkBufferUsageFlagBits m_bufferLoadUsageBit;
1422 LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
1423 const Texture& texture,
1424 const VkFormat format,
1425 const VkFormat imageFormat,
1426 const bool declareImageFormatInShader,
1427 const bool singleLayerBind,
1428 const bool minalign,
1429 const bool bufferLoadUniform)
1430 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1431 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
1432 , m_imageFormat (imageFormat)
1433 , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
1434 , m_bufferLoadUniform (bufferLoadUniform)
1436 const DeviceInterface& vk = m_context.getDeviceInterface();
1437 const VkDevice device = m_context.getDevice();
1438 Allocator& allocator = m_context.getDefaultAllocator();
1440 m_bufferLoadDescriptorType = m_bufferLoadUniform ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1441 m_bufferLoadUsageBit = m_bufferLoadUniform ? VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1443 // A helper buffer with enough space to hold the whole image.
1445 m_imageBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1446 vk, device, allocator,
1447 makeBufferCreateInfo(m_imageSizeBytes + m_srcViewOffset, m_bufferLoadUsageBit | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1448 MemoryRequirement::HostVisible));
1450 // Copy reference data to buffer for subsequent upload to image.
1452 const Allocation& alloc = m_imageBuffer->getAllocation();
1453 deMemcpy((char *)alloc.getHostPtr() + m_srcViewOffset, m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
1454 flushAlloc(vk, device, alloc);
1457 tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
1459 const DeviceInterface& vk = m_context.getDeviceInterface();
1460 const VkDevice device = m_context.getDevice();
1462 // Apply the same transformation as done in the shader
1463 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
1464 flipHorizontally(reference);
1466 const Allocation& alloc = getResultBuffer()->getAllocation();
1467 invalidateAlloc(vk, device, alloc);
1468 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), (const char *)alloc.getHostPtr() + m_dstViewOffset);
1470 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
1471 return tcu::TestStatus::pass("Passed");
1473 return tcu::TestStatus::fail("Image comparison failed");
1476 //! Load/store test for images
1477 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
1480 ImageLoadStoreTestInstance (Context& context,
1481 const Texture& texture,
1482 const VkFormat format,
1483 const VkFormat imageFormat,
1484 const bool declareImageFormatInShader,
1485 const bool singleLayerBind,
1486 const bool minalign,
1487 const bool bufferLoadUniform);
1490 VkDescriptorSetLayout prepareDescriptors (void);
1491 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1492 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1493 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1495 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1496 const VkPipelineLayout pipelineLayout,
1497 const int layerNdx);
1499 BufferWithMemory* getResultBuffer (void) const { return m_imageBuffer.get(); }
1501 de::MovePtr<Image> m_imageSrc;
1502 de::MovePtr<Image> m_imageDst;
1503 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1504 Move<VkDescriptorPool> m_descriptorPool;
1505 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1506 std::vector<SharedVkImageView> m_allSrcImageViews;
1507 std::vector<SharedVkImageView> m_allDstImageViews;
1510 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
1511 const Texture& texture,
1512 const VkFormat format,
1513 const VkFormat imageFormat,
1514 const bool declareImageFormatInShader,
1515 const bool singleLayerBind,
1516 const bool minalign,
1517 const bool bufferLoadUniform)
1518 : LoadStoreTestInstance (context, texture, format, imageFormat, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1519 , m_allDescriptorSets (texture.numLayers())
1520 , m_allSrcImageViews (texture.numLayers())
1521 , m_allDstImageViews (texture.numLayers())
1523 const DeviceInterface& vk = m_context.getDeviceInterface();
1524 const VkDevice device = m_context.getDevice();
1525 Allocator& allocator = m_context.getDefaultAllocator();
1526 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1528 m_imageSrc = de::MovePtr<Image>(new Image(
1529 vk, device, allocator,
1530 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
1531 MemoryRequirement::Any));
1533 m_imageDst = de::MovePtr<Image>(new Image(
1534 vk, device, allocator,
1535 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
1536 MemoryRequirement::Any));
1539 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
1541 const VkDevice device = m_context.getDevice();
1542 const DeviceInterface& vk = m_context.getDeviceInterface();
1544 const int numLayers = m_texture.numLayers();
1545 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1546 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1547 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1550 m_descriptorPool = DescriptorPoolBuilder()
1551 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1552 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1553 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1555 if (m_singleLayerBind)
1557 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1559 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1560 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
1562 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1563 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1564 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1567 else // bind all layers at once
1569 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1570 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1572 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1573 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1574 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1577 return *m_descriptorSetLayout; // not passing the ownership
1580 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1582 const VkDevice device = m_context.getDevice();
1583 const DeviceInterface& vk = m_context.getDeviceInterface();
1585 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1586 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1587 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1589 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1590 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1592 DescriptorSetUpdateBuilder()
1593 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1594 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1595 .update(vk, device);
1596 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1599 void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1601 const DeviceInterface& vk = m_context.getDeviceInterface();
1603 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1605 const VkImageMemoryBarrier preCopyImageBarriers[] =
1607 makeImageMemoryBarrier(
1608 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1609 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1610 m_imageSrc->get(), fullImageSubresourceRange),
1611 makeImageMemoryBarrier(
1612 0u, VK_ACCESS_SHADER_WRITE_BIT,
1613 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1614 m_imageDst->get(), fullImageSubresourceRange)
1617 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1618 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1619 m_imageBuffer->get(), 0ull, m_imageSizeBytes + m_srcViewOffset);
1621 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1622 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1625 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1626 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1627 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1628 m_imageSrc->get(), fullImageSubresourceRange);
1630 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1632 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1633 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1637 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1639 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1642 void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1644 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1647 //! Load/store Lod AMD test for images
1648 class ImageLoadStoreLodAMDTestInstance : public BaseTestInstance
1651 ImageLoadStoreLodAMDTestInstance (Context& context,
1652 const Texture& texture,
1653 const VkFormat format,
1654 const VkFormat imageFormat,
1655 const bool declareImageFormatInShader,
1656 const bool singleLayerBind,
1657 const bool minalign,
1658 const bool bufferLoadUniform);
1661 VkDescriptorSetLayout prepareDescriptors (void);
1662 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1663 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1664 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1666 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1667 const VkPipelineLayout pipelineLayout,
1668 const int layerNdx);
1670 BufferWithMemory* getResultBuffer (void) const { return m_imageBuffer.get(); }
1671 tcu::TestStatus verifyResult (void);
1673 de::MovePtr<BufferWithMemory> m_imageBuffer; //!< Source data and helper buffer
1674 const VkDeviceSize m_imageSizeBytes;
1675 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
1676 std::vector<tcu::TextureLevel> m_referenceImages; //!< Used as input data and later to verify result image
1678 bool m_bufferLoadUniform;
1679 VkDescriptorType m_bufferLoadDescriptorType;
1680 VkBufferUsageFlagBits m_bufferLoadUsageBit;
1682 de::MovePtr<Image> m_imageSrc;
1683 de::MovePtr<Image> m_imageDst;
1684 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1685 Move<VkDescriptorPool> m_descriptorPool;
1686 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1687 std::vector<SharedVkImageView> m_allSrcImageViews;
1688 std::vector<SharedVkImageView> m_allDstImageViews;
1692 ImageLoadStoreLodAMDTestInstance::ImageLoadStoreLodAMDTestInstance (Context& context,
1693 const Texture& texture,
1694 const VkFormat format,
1695 const VkFormat imageFormat,
1696 const bool declareImageFormatInShader,
1697 const bool singleLayerBind,
1698 const bool minalign,
1699 const bool bufferLoadUniform)
1700 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1701 , m_imageSizeBytes (getMipmapImageTotalSizeBytes(texture, format))
1702 , m_imageFormat (imageFormat)
1703 , m_bufferLoadUniform (bufferLoadUniform)
1704 , m_allDescriptorSets (texture.numLayers())
1705 , m_allSrcImageViews (texture.numLayers())
1706 , m_allDstImageViews (texture.numLayers())
1708 const DeviceInterface& vk = m_context.getDeviceInterface();
1709 const VkDevice device = m_context.getDevice();
1710 Allocator& allocator = m_context.getDefaultAllocator();
1711 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1713 const VkSampleCountFlagBits samples = static_cast<VkSampleCountFlagBits>(m_texture.numSamples()); // integer and bit mask are aligned, so we can cast like this
1715 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1717 tcu::TextureLevel referenceImage = generateReferenceImage(texture.size(levelNdx), imageFormat, format);
1718 m_referenceImages.push_back(referenceImage);
1721 m_bufferLoadDescriptorType = m_bufferLoadUniform ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1722 m_bufferLoadUsageBit = m_bufferLoadUniform ? VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1724 // A helper buffer with enough space to hold the whole image.
1725 m_imageBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1726 vk, device, allocator,
1727 makeBufferCreateInfo(m_imageSizeBytes + m_srcViewOffset, m_bufferLoadUsageBit | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1728 MemoryRequirement::HostVisible));
1730 // Copy reference data to buffer for subsequent upload to image.
1732 const Allocation& alloc = m_imageBuffer->getAllocation();
1733 VkDeviceSize bufferOffset = 0u;
1734 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1736 deMemcpy((char *)alloc.getHostPtr() + m_srcViewOffset + bufferOffset, m_referenceImages[levelNdx].getAccess().getDataPtr(), static_cast<size_t>(getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx)));
1737 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1739 flushAlloc(vk, device, alloc);
1743 const VkImageCreateInfo imageParamsSrc =
1745 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1746 DE_NULL, // const void* pNext;
1747 (isCube(m_texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) | imageFlags, // VkImageCreateFlags flags;
1748 mapImageType(m_texture.type()), // VkImageType imageType;
1749 m_imageFormat, // VkFormat format;
1750 makeExtent3D(m_texture.layerSize()), // VkExtent3D extent;
1751 (deUint32)m_texture.numMipmapLevels(), // deUint32 mipLevels;
1752 (deUint32)m_texture.numLayers(), // deUint32 arrayLayers;
1753 samples, // VkSampleCountFlagBits samples;
1754 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1755 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
1756 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1757 0u, // deUint32 queueFamilyIndexCount;
1758 DE_NULL, // const deUint32* pQueueFamilyIndices;
1759 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1762 m_imageSrc = de::MovePtr<Image>(new Image(
1763 vk, device, allocator,
1765 MemoryRequirement::Any));
1769 const VkImageCreateInfo imageParamsDst =
1771 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
1772 DE_NULL, // const void* pNext;
1773 (isCube(m_texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) | imageFlags, // VkImageCreateFlags flags;
1774 mapImageType(m_texture.type()), // VkImageType imageType;
1775 m_imageFormat, // VkFormat format;
1776 makeExtent3D(m_texture.layerSize()), // VkExtent3D extent;
1777 (deUint32)m_texture.numMipmapLevels(), // deUint32 mipLevels;
1778 (deUint32)m_texture.numLayers(), // deUint32 arrayLayers;
1779 samples, // VkSampleCountFlagBits samples;
1780 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
1781 VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // VkImageUsageFlags usage;
1782 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1783 0u, // deUint32 queueFamilyIndexCount;
1784 DE_NULL, // const deUint32* pQueueFamilyIndices;
1785 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
1788 m_imageDst = de::MovePtr<Image>(new Image(
1789 vk, device, allocator,
1791 MemoryRequirement::Any));
1795 tcu::TestStatus ImageLoadStoreLodAMDTestInstance::verifyResult (void)
1797 const DeviceInterface& vk = m_context.getDeviceInterface();
1798 const VkDevice device = m_context.getDevice();
1800 const Allocation& alloc = getResultBuffer()->getAllocation();
1801 invalidateAlloc(vk, device, alloc);
1803 VkDeviceSize bufferOffset = 0;
1804 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1806 // Apply the same transformation as done in the shader
1807 const tcu::PixelBufferAccess reference = m_referenceImages[levelNdx].getAccess();
1808 flipHorizontally(reference);
1810 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(levelNdx), (const char *)alloc.getHostPtr() + m_dstViewOffset + bufferOffset);
1812 if (!comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result, levelNdx))
1814 std::ostringstream errorMessage;
1815 errorMessage << "Image Level " << levelNdx << " comparison failed";
1816 return tcu::TestStatus::fail(errorMessage.str());
1818 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1821 return tcu::TestStatus::pass("Passed");
1824 VkDescriptorSetLayout ImageLoadStoreLodAMDTestInstance::prepareDescriptors (void)
1826 const VkDevice device = m_context.getDevice();
1827 const DeviceInterface& vk = m_context.getDeviceInterface();
1829 const int numLayers = m_texture.numLayers();
1830 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1831 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1832 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1835 m_descriptorPool = DescriptorPoolBuilder()
1836 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1837 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1838 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1840 if (m_singleLayerBind)
1842 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1844 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1845 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), layerNdx, 1u);
1847 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1848 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1849 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1852 else // bind all layers at once
1854 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1855 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), 0u, numLayers);
1857 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1858 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1859 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1862 return *m_descriptorSetLayout; // not passing the ownership
1865 void ImageLoadStoreLodAMDTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1867 const VkDevice device = m_context.getDevice();
1868 const DeviceInterface& vk = m_context.getDeviceInterface();
1870 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1871 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1872 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1874 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1875 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1877 DescriptorSetUpdateBuilder()
1878 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1879 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1880 .update(vk, device);
1881 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1884 void ImageLoadStoreLodAMDTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1886 const DeviceInterface& vk = m_context.getDeviceInterface();
1887 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, m_texture.numMipmapLevels(), 0u, m_texture.numLayers());
1889 const VkImageMemoryBarrier preCopyImageBarriers[] =
1891 makeImageMemoryBarrier(
1892 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1893 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1894 m_imageSrc->get(), fullImageSubresourceRange),
1895 makeImageMemoryBarrier(
1896 0u, VK_ACCESS_SHADER_WRITE_BIT,
1897 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1898 m_imageDst->get(), fullImageSubresourceRange)
1901 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1902 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1903 m_imageBuffer->get(), 0ull, m_imageSizeBytes + m_srcViewOffset);
1905 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1906 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1909 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1910 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1911 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1912 m_imageSrc->get(), fullImageSubresourceRange);
1914 std::vector<VkBufferImageCopy> copyRegions;
1915 VkDeviceSize bufferOffset = 0u;
1916 for (deInt32 levelNdx = 0; levelNdx < m_texture.numMipmapLevels(); levelNdx++)
1918 const VkBufferImageCopy copyParams =
1920 bufferOffset, // VkDeviceSize bufferOffset;
1921 0u, // deUint32 bufferRowLength;
1922 0u, // deUint32 bufferImageHeight;
1923 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, levelNdx, 0u, m_texture.numLayers()), // VkImageSubresourceLayers imageSubresource;
1924 makeOffset3D(0, 0, 0), // VkOffset3D imageOffset;
1925 makeExtent3D(m_texture.layerSize(levelNdx)), // VkExtent3D imageExtent;
1927 copyRegions.push_back(copyParams);
1928 bufferOffset += getMipmapLevelImageSizeBytes(m_texture, m_imageFormat, levelNdx);
1931 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32) copyRegions.size(), copyRegions.data());
1932 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1936 void ImageLoadStoreLodAMDTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1938 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1941 void ImageLoadStoreLodAMDTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1943 commandCopyMipmapImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageFormat, m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1946 //! Load/store test for buffers
1947 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1950 BufferLoadStoreTestInstance (Context& context,
1951 const Texture& texture,
1952 const VkFormat format,
1953 const VkFormat imageFormat,
1954 const bool declareImageFormatInShader,
1955 const bool minalign,
1956 const bool bufferLoadUniform);
1959 VkDescriptorSetLayout prepareDescriptors (void);
1960 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1962 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1963 const VkPipelineLayout pipelineLayout,
1964 const int layerNdx);
1966 BufferWithMemory* getResultBuffer (void) const { return m_imageBufferDst.get(); }
1968 de::MovePtr<BufferWithMemory> m_imageBufferDst;
1969 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1970 Move<VkDescriptorPool> m_descriptorPool;
1971 Move<VkDescriptorSet> m_descriptorSet;
1972 Move<VkBufferView> m_bufferViewSrc;
1973 Move<VkBufferView> m_bufferViewDst;
1976 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
1977 const Texture& texture,
1978 const VkFormat format,
1979 const VkFormat imageFormat,
1980 const bool declareImageFormatInShader,
1981 const bool minalign,
1982 const bool bufferLoadUniform)
1983 : LoadStoreTestInstance(context, texture, format, imageFormat, declareImageFormatInShader, false, minalign, bufferLoadUniform)
1985 const DeviceInterface& vk = m_context.getDeviceInterface();
1986 const VkDevice device = m_context.getDevice();
1987 Allocator& allocator = m_context.getDefaultAllocator();
1989 // Create a destination buffer.
1991 m_imageBufferDst = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1992 vk, device, allocator,
1993 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1994 MemoryRequirement::HostVisible));
1997 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1999 const DeviceInterface& vk = m_context.getDeviceInterface();
2000 const VkDevice device = m_context.getDevice();
2002 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
2003 .addSingleBinding(m_bufferLoadDescriptorType, VK_SHADER_STAGE_COMPUTE_BIT)
2004 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2007 m_descriptorPool = DescriptorPoolBuilder()
2008 .addType(m_bufferLoadDescriptorType)
2009 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
2010 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2012 VkFormat dstFormat = formatHasThreeComponents(m_format) ? getSingleComponentFormat(m_format) : m_format;
2014 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
2015 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_srcViewOffset, m_imageSizeBytes);
2016 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), dstFormat, m_dstViewOffset, m_imageSizeBytes);
2018 return *m_descriptorSetLayout; // not passing the ownership
2021 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
2023 DE_ASSERT(layerNdx == 0);
2026 const VkDevice device = m_context.getDevice();
2027 const DeviceInterface& vk = m_context.getDeviceInterface();
2029 DescriptorSetUpdateBuilder()
2030 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), m_bufferLoadDescriptorType, &m_bufferViewSrc.get())
2031 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
2032 .update(vk, device);
2033 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
2036 void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
2038 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes + m_dstViewOffset);
2041 TestInstance* StoreTest::createInstance (Context& context) const
2043 if (m_texture.type() == IMAGE_TYPE_BUFFER)
2044 return new BufferStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_minalign, m_storeConstantValue);
2046 return new ImageStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_singleLayerBind, m_minalign, m_storeConstantValue);
2049 TestInstance* LoadStoreTest::createInstance (Context& context) const
2051 if (m_imageLoadStoreLodAMD)
2052 return new ImageLoadStoreLodAMDTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_singleLayerBind, m_minalign, m_bufferLoadUniform);
2054 if (m_texture.type() == IMAGE_TYPE_BUFFER)
2055 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_minalign, m_bufferLoadUniform);
2057 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_singleLayerBind, m_minalign, m_bufferLoadUniform);
2060 class ImageExtendOperandTestInstance : public BaseTestInstance
2063 ImageExtendOperandTestInstance (Context& context,
2064 const Texture& texture,
2065 const VkFormat readFormat,
2066 const VkFormat writeFormat,
2067 bool relaxedPrecision);
2069 virtual ~ImageExtendOperandTestInstance (void) {}
2073 VkDescriptorSetLayout prepareDescriptors (void);
2074 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
2075 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
2076 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
2078 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
2079 const VkPipelineLayout pipelineLayout,
2080 const int layerNdx);
2082 tcu::TestStatus verifyResult (void);
2087 tcu::TextureLevel m_inputImageData;
2089 de::MovePtr<Image> m_imageSrc; // source image
2090 SharedVkImageView m_imageSrcView;
2091 VkDeviceSize m_imageSrcSize;
2093 de::MovePtr<Image> m_imageDst; // dest image
2094 SharedVkImageView m_imageDstView;
2095 VkFormat m_imageDstFormat;
2096 VkDeviceSize m_imageDstSize;
2098 de::MovePtr<BufferWithMemory> m_buffer; // result buffer
2100 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
2101 Move<VkDescriptorPool> m_descriptorPool;
2102 SharedVkDescriptorSet m_descriptorSet;
2104 bool m_relaxedPrecision;
2107 ImageExtendOperandTestInstance::ImageExtendOperandTestInstance (Context& context,
2108 const Texture& texture,
2109 const VkFormat readFormat,
2110 const VkFormat writeFormat,
2111 bool relaxedPrecision)
2112 : BaseTestInstance (context, texture, readFormat, true, true, false, false)
2113 , m_imageDstFormat (writeFormat)
2114 , m_relaxedPrecision (relaxedPrecision)
2116 const DeviceInterface& vk = m_context.getDeviceInterface();
2117 const VkDevice device = m_context.getDevice();
2118 Allocator& allocator = m_context.getDefaultAllocator();
2119 const deInt32 width = texture.size().x();
2120 const deInt32 height = texture.size().y();
2121 const tcu::TextureFormat textureFormat = mapVkFormat(m_format);
2123 // Generate reference image
2124 m_isSigned = (getTextureChannelClass(textureFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
2125 m_inputImageData.setStorage(textureFormat, width, height, 1);
2127 const tcu::PixelBufferAccess access = m_inputImageData.getAccess();
2128 const int valueStart = (m_isSigned ? (-width / 2) : 0);
2130 for (int x = 0; x < width; ++x)
2131 for (int y = 0; y < height; ++y)
2133 const tcu::IVec4 color(valueStart + x, valueStart + y, valueStart, valueStart);
2134 access.setPixel(color, x, y);
2137 // Create source image
2138 m_imageSrc = de::MovePtr<Image>(new Image(
2139 vk, device, allocator,
2140 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0u),
2141 MemoryRequirement::Any));
2143 // Create destination image
2144 m_imageDst = de::MovePtr<Image>(new Image(
2145 vk, device, allocator,
2146 makeImageCreateInfo(m_texture, m_imageDstFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
2147 MemoryRequirement::Any));
2149 // Compute image and buffer sizes
2150 m_imageSrcSize = width * height * tcu::getPixelSize(textureFormat);
2151 m_imageDstSize = width * height * tcu::getPixelSize(mapVkFormat(m_imageDstFormat));
2152 VkDeviceSize bufferSizeBytes = de::max(m_imageSrcSize, m_imageDstSize);
2154 // Create helper buffer able to store input data and image write result
2155 m_buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2156 vk, device, allocator,
2157 makeBufferCreateInfo(bufferSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
2158 MemoryRequirement::HostVisible));
2160 const Allocation& alloc = m_buffer->getAllocation();
2161 deMemcpy(alloc.getHostPtr(), m_inputImageData.getAccess().getDataPtr(), static_cast<size_t>(m_imageSrcSize));
2162 flushAlloc(vk, device, alloc);
2165 VkDescriptorSetLayout ImageExtendOperandTestInstance::prepareDescriptors (void)
2167 const DeviceInterface& vk = m_context.getDeviceInterface();
2168 const VkDevice device = m_context.getDevice();
2170 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
2171 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2172 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2175 m_descriptorPool = DescriptorPoolBuilder()
2176 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2177 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2178 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1);
2180 const VkImageViewType viewType = mapImageViewType(m_texture.type());
2181 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2183 m_descriptorSet = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
2184 m_imageSrcView = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
2185 m_imageDstView = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_imageDstFormat, subresourceRange));
2187 return *m_descriptorSetLayout; // not passing the ownership
2190 void ImageExtendOperandTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
2194 const DeviceInterface& vk = m_context.getDeviceInterface();
2195 const VkDevice device = m_context.getDevice();
2196 const VkDescriptorSet descriptorSet = **m_descriptorSet;
2198 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, **m_imageSrcView, VK_IMAGE_LAYOUT_GENERAL);
2199 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, **m_imageDstView, VK_IMAGE_LAYOUT_GENERAL);
2201 typedef DescriptorSetUpdateBuilder::Location DSUBL;
2202 DescriptorSetUpdateBuilder()
2203 .writeSingle(descriptorSet, DSUBL::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
2204 .writeSingle(descriptorSet, DSUBL::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
2205 .update(vk, device);
2206 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
2209 void ImageExtendOperandTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
2211 const DeviceInterface& vk = m_context.getDeviceInterface();
2213 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
2215 const VkImageMemoryBarrier preCopyImageBarriers[] =
2217 makeImageMemoryBarrier(
2218 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
2219 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
2220 m_imageSrc->get(), fullImageSubresourceRange),
2221 makeImageMemoryBarrier(
2222 0u, VK_ACCESS_SHADER_WRITE_BIT,
2223 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
2224 m_imageDst->get(), fullImageSubresourceRange)
2227 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
2228 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
2229 m_buffer->get(), 0ull, m_imageSrcSize);
2231 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
2232 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
2235 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
2236 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
2237 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
2238 m_imageSrc->get(), fullImageSubresourceRange);
2240 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
2242 vk.cmdCopyBufferToImage(cmdBuffer, m_buffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
2243 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
2247 void ImageExtendOperandTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
2249 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
2252 void ImageExtendOperandTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
2254 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_buffer->get(), m_imageDstSize, m_texture);
2257 // Clears the high bits of every pixel in the pixel buffer, leaving only the lowest 16 bits of each component.
2258 void clearHighBits (const tcu::PixelBufferAccess& pixels, int width, int height)
2260 for (int y = 0; y < height; ++y)
2261 for (int x = 0; x < width; ++x)
2263 auto color = pixels.getPixelUint(x, y);
2264 for (int c = 0; c < decltype(color)::SIZE; ++c)
2265 color[c] &= 0xFFFFull;
2266 pixels.setPixel(color, x, y);
2270 tcu::TestStatus ImageExtendOperandTestInstance::verifyResult (void)
2272 const DeviceInterface& vk = m_context.getDeviceInterface();
2273 const VkDevice device = m_context.getDevice();
2274 const tcu::IVec3 imageSize = m_texture.size();
2275 const tcu::PixelBufferAccess inputAccess = m_inputImageData.getAccess();
2276 const deInt32 width = inputAccess.getWidth();
2277 const deInt32 height = inputAccess.getHeight();
2278 tcu::TextureLevel refImage (mapVkFormat(m_imageDstFormat), width, height);
2279 tcu::PixelBufferAccess refAccess = refImage.getAccess();
2281 for (int x = 0; x < width; ++x)
2282 for (int y = 0; y < height; ++y)
2284 tcu::IVec4 color = inputAccess.getPixelInt(x, y);
2285 refAccess.setPixel(color, x, y);
2288 const Allocation& alloc = m_buffer->getAllocation();
2289 invalidateAlloc(vk, device, alloc);
2290 const tcu::PixelBufferAccess result(mapVkFormat(m_imageDstFormat), imageSize, alloc.getHostPtr());
2292 if (m_relaxedPrecision)
2294 // Preserve the lowest 16 bits of the reference and result pixels only.
2295 clearHighBits(refAccess, width, height);
2296 clearHighBits(result, width, height);
2299 if (tcu::intThresholdCompare (m_context.getTestContext().getLog(), "Comparison", "Comparison", refAccess, result, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT, true/*use64Bits*/))
2300 return tcu::TestStatus::pass("Passed");
2302 return tcu::TestStatus::fail("Image comparison failed");
2305 enum class ExtendTestType
2312 enum class ExtendOperand
2318 class ImageExtendOperandTest : public TestCase
2321 ImageExtendOperandTest (tcu::TestContext& testCtx,
2322 const std::string& name,
2323 const Texture texture,
2324 const VkFormat readFormat,
2325 const VkFormat writeFormat,
2326 const bool signedInt,
2327 const bool relaxedPrecision,
2328 ExtendTestType extendTestType);
2330 void checkSupport (Context& context) const;
2331 void initPrograms (SourceCollections& programCollection) const;
2332 TestInstance* createInstance (Context& context) const;
2335 bool isWriteTest () const { return (m_extendTestType == ExtendTestType::WRITE) ||
2336 (m_extendTestType == ExtendTestType::WRITE_NONTEMPORAL); }
2338 const Texture m_texture;
2339 VkFormat m_readFormat;
2340 VkFormat m_writeFormat;
2341 bool m_operandForce; // Use an operand that doesn't match SampledType?
2342 bool m_relaxedPrecision;
2343 ExtendTestType m_extendTestType;
2346 ImageExtendOperandTest::ImageExtendOperandTest (tcu::TestContext& testCtx,
2347 const std::string& name,
2348 const Texture texture,
2349 const VkFormat readFormat,
2350 const VkFormat writeFormat,
2351 const bool operandForce,
2352 const bool relaxedPrecision,
2353 ExtendTestType extendTestType)
2354 : TestCase (testCtx, name, "")
2355 , m_texture (texture)
2356 , m_readFormat (readFormat)
2357 , m_writeFormat (writeFormat)
2358 , m_operandForce (operandForce)
2359 , m_relaxedPrecision (relaxedPrecision)
2360 , m_extendTestType (extendTestType)
2364 void checkFormatProperties (Context& context, VkFormat format)
2366 #ifndef CTS_USES_VULKANSC
2367 const VkFormatProperties3 formatProperties (context.getFormatProperties(format));
2369 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
2370 TCU_THROW(NotSupportedError, "Format not supported for storage images");
2372 const VkFormatProperties formatProperties(getPhysicalDeviceFormatProperties(context.getInstanceInterface(), context.getPhysicalDevice(), format));
2374 if (!(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
2375 TCU_THROW(NotSupportedError, "Format not supported for storage images");
2376 #endif // CTS_USES_VULKANSC
2379 void check64BitSupportIfNeeded (Context& context, VkFormat readFormat, VkFormat writeFormat)
2381 if (is64BitIntegerFormat(readFormat) || is64BitIntegerFormat(writeFormat))
2383 const auto& features = context.getDeviceFeatures();
2384 if (!features.shaderInt64)
2385 TCU_THROW(NotSupportedError, "64-bit integers not supported in shaders");
2389 void ImageExtendOperandTest::checkSupport (Context& context) const
2391 if (!context.requireDeviceFunctionality("VK_KHR_spirv_1_4"))
2392 TCU_THROW(NotSupportedError, "VK_KHR_spirv_1_4 not supported");
2394 #ifndef CTS_USES_VULKANSC
2395 if ((m_extendTestType == ExtendTestType::WRITE_NONTEMPORAL) &&
2396 (context.getUsedApiVersion() < VK_API_VERSION_1_3))
2397 TCU_THROW(NotSupportedError, "Vulkan 1.3 or higher is required for this test to run");
2398 #endif // CTS_USES_VULKANSC
2400 check64BitSupportIfNeeded(context, m_readFormat, m_writeFormat);
2402 checkFormatProperties(context, m_readFormat);
2403 checkFormatProperties(context, m_writeFormat);
2406 void ImageExtendOperandTest::initPrograms (SourceCollections& programCollection) const
2408 tcu::StringTemplate shaderTemplate(
2409 "OpCapability Shader\n"
2410 "OpCapability StorageImageExtendedFormats\n"
2415 "%std450 = OpExtInstImport \"GLSL.std.450\"\n"
2416 "OpMemoryModel Logical GLSL450\n"
2417 "OpEntryPoint GLCompute %main \"main\" %id %src_image_ptr %dst_image_ptr\n"
2418 "OpExecutionMode %main LocalSize 1 1 1\n"
2421 "OpDecorate %id BuiltIn GlobalInvocationId\n"
2423 "OpDecorate %src_image_ptr DescriptorSet 0\n"
2424 "OpDecorate %src_image_ptr Binding 0\n"
2425 "OpDecorate %src_image_ptr NonWritable\n"
2427 "${relaxed_precision}"
2429 "OpDecorate %dst_image_ptr DescriptorSet 0\n"
2430 "OpDecorate %dst_image_ptr Binding 1\n"
2431 "OpDecorate %dst_image_ptr NonReadable\n"
2434 "%type_void = OpTypeVoid\n"
2435 "%type_i32 = OpTypeInt 32 1\n"
2436 "%type_u32 = OpTypeInt 32 0\n"
2437 "%type_vec2_i32 = OpTypeVector %type_i32 2\n"
2438 "%type_vec2_u32 = OpTypeVector %type_u32 2\n"
2439 "%type_vec3_i32 = OpTypeVector %type_i32 3\n"
2440 "%type_vec3_u32 = OpTypeVector %type_u32 3\n"
2441 "%type_vec4_i32 = OpTypeVector %type_i32 4\n"
2442 "%type_vec4_u32 = OpTypeVector %type_u32 4\n"
2445 "%type_fun_void = OpTypeFunction %type_void\n"
2449 "%type_ptr_in_vec3_u32 = OpTypePointer Input %type_vec3_u32\n"
2450 "%type_ptr_in_u32 = OpTypePointer Input %type_u32\n"
2455 "%id = OpVariable %type_ptr_in_vec3_u32 Input\n"
2457 "${image_variables}"
2460 "%main = OpFunction %type_void None %type_fun_void\n"
2461 "%label = OpLabel\n"
2465 "%idvec = OpLoad %type_vec3_u32 %id\n"
2466 "%id_xy = OpVectorShuffle %type_vec2_u32 %idvec %idvec 0 1\n"
2467 "%coord = OpBitcast %type_vec2_i32 %id_xy\n"
2468 "%value = OpImageRead ${sampled_type_vec4} %src_image %coord ${read_extend_operand}\n"
2469 " OpImageWrite %dst_image %coord %value ${write_extend_operand}\n"
2471 " OpFunctionEnd\n");
2473 const auto testedFormat = mapVkFormat(isWriteTest() ? m_writeFormat : m_readFormat);
2474 const bool isSigned = (getTextureChannelClass(testedFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
2476 const auto isRead64 = is64BitIntegerFormat(m_readFormat);
2477 const auto isWrite64 = is64BitIntegerFormat(m_writeFormat);
2478 DE_ASSERT(isRead64 == isWrite64);
2480 const bool using64Bits = (isRead64 || isWrite64);
2482 // Additional capabilities when needed.
2483 std::string capability;
2484 std::string extension;
2485 std::string extraTypes;
2489 extension += "OpExtension \"SPV_EXT_shader_image_int64\"\n";
2491 "OpCapability Int64\n"
2492 "OpCapability Int64ImageEXT\n"
2495 "%type_i64 = OpTypeInt 64 1\n"
2496 "%type_u64 = OpTypeInt 64 0\n"
2497 "%type_vec3_i64 = OpTypeVector %type_i64 3\n"
2498 "%type_vec3_u64 = OpTypeVector %type_u64 3\n"
2499 "%type_vec4_i64 = OpTypeVector %type_i64 4\n"
2500 "%type_vec4_u64 = OpTypeVector %type_u64 4\n"
2504 std::string relaxed = "";
2505 if (m_relaxedPrecision)
2506 relaxed += "OpDecorate %src_image_ptr RelaxedPrecision\n";
2508 // Sampled type depends on the format sign and mismatch force flag.
2509 const bool signedSampleType = ((isSigned && !m_operandForce) || (!isSigned && m_operandForce));
2510 const std::string bits = (using64Bits ? "64" : "32");
2511 const std::string sampledTypePostfix = (signedSampleType ? "i" : "u") + bits;
2512 const std::string extendOperandStr = (isSigned ? "SignExtend" : "ZeroExtend");
2514 std::map<std::string, std::string> specializations
2516 { "image_type_id", "%type_image" },
2517 { "image_uni_ptr_type_id", "%type_ptr_uniform_const_image" },
2518 { "image_var_id", "%src_image_ptr" },
2519 { "image_id", "%src_image" },
2520 { "capability", capability },
2521 { "extension", extension },
2522 { "extra_types", extraTypes },
2523 { "relaxed_precision", relaxed },
2524 { "image_format", getSpirvFormat(m_readFormat) },
2525 { "sampled_type", (std::string("%type_") + sampledTypePostfix) },
2526 { "sampled_type_vec4", (std::string("%type_vec4_") + sampledTypePostfix) },
2527 { "read_extend_operand", (!isWriteTest() ? extendOperandStr : "") },
2528 { "write_extend_operand", (isWriteTest() ? extendOperandStr : "") },
2531 SpirvVersion spirvVersion = SPIRV_VERSION_1_4;
2532 bool allowSpirv14 = true;
2533 if (m_extendTestType == ExtendTestType::WRITE_NONTEMPORAL)
2535 spirvVersion = SPIRV_VERSION_1_6;
2536 allowSpirv14 = false;
2537 specializations["write_extend_operand"] = "Nontemporal";
2540 // Addidtional parametrization is needed for a case when source and destination textures have same format
2541 tcu::StringTemplate imageTypeTemplate(
2542 "${image_type_id} = OpTypeImage ${sampled_type} 2D 0 0 0 2 ${image_format}\n");
2543 tcu::StringTemplate imageUniformTypeTemplate(
2544 "${image_uni_ptr_type_id} = OpTypePointer UniformConstant ${image_type_id}\n");
2545 tcu::StringTemplate imageVariablesTemplate(
2546 "${image_var_id} = OpVariable ${image_uni_ptr_type_id} UniformConstant\n");
2547 tcu::StringTemplate imageLoadTemplate(
2548 "${image_id} = OpLoad ${image_type_id} ${image_var_id}\n");
2550 std::string imageTypes;
2551 std::string imageUniformTypes;
2552 std::string imageVariables;
2553 std::string imageLoad;
2555 // If input image format is the same as output there is less spir-v definitions
2556 if (m_readFormat == m_writeFormat)
2558 imageTypes = imageTypeTemplate.specialize(specializations);
2559 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
2560 imageVariables = imageVariablesTemplate.specialize(specializations);
2561 imageLoad = imageLoadTemplate.specialize(specializations);
2563 specializations["image_var_id"] = "%dst_image_ptr";
2564 specializations["image_id"] = "%dst_image";
2565 imageVariables += imageVariablesTemplate.specialize(specializations);
2566 imageLoad += imageLoadTemplate.specialize(specializations);
2570 specializations["image_type_id"] = "%type_src_image";
2571 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_src_image";
2572 imageTypes = imageTypeTemplate.specialize(specializations);
2573 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
2574 imageVariables = imageVariablesTemplate.specialize(specializations);
2575 imageLoad = imageLoadTemplate.specialize(specializations);
2577 specializations["image_format"] = getSpirvFormat(m_writeFormat);
2578 specializations["image_type_id"] = "%type_dst_image";
2579 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_dst_image";
2580 specializations["image_var_id"] = "%dst_image_ptr";
2581 specializations["image_id"] = "%dst_image";
2582 imageTypes += imageTypeTemplate.specialize(specializations);
2583 imageUniformTypes += imageUniformTypeTemplate.specialize(specializations);
2584 imageVariables += imageVariablesTemplate.specialize(specializations);
2585 imageLoad += imageLoadTemplate.specialize(specializations);
2588 specializations["image_types"] = imageTypes;
2589 specializations["image_uniforms"] = imageUniformTypes;
2590 specializations["image_variables"] = imageVariables;
2591 specializations["image_load"] = imageLoad;
2593 // Specialize whole shader and add it to program collection
2594 programCollection.spirvAsmSources.add("comp") << shaderTemplate.specialize(specializations)
2595 << vk::SpirVAsmBuildOptions(programCollection.usedVulkanVersion, spirvVersion, allowSpirv14);
2598 TestInstance* ImageExtendOperandTest::createInstance(Context& context) const
2600 return new ImageExtendOperandTestInstance(context, m_texture, m_readFormat, m_writeFormat, m_relaxedPrecision);
2603 static const Texture s_textures[] =
2605 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
2606 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
2607 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
2608 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
2609 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
2610 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
2611 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
2612 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
2615 const Texture& getTestTexture (const ImageType imageType)
2617 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2618 if (s_textures[textureNdx].type() == imageType)
2619 return s_textures[textureNdx];
2621 DE_FATAL("Internal error");
2622 return s_textures[0];
2625 static const VkFormat s_formats[] =
2627 VK_FORMAT_R32G32B32A32_SFLOAT,
2628 VK_FORMAT_R16G16B16A16_SFLOAT,
2629 VK_FORMAT_R32_SFLOAT,
2631 VK_FORMAT_R32G32B32A32_UINT,
2632 VK_FORMAT_R16G16B16A16_UINT,
2633 VK_FORMAT_R8G8B8A8_UINT,
2636 VK_FORMAT_R32G32B32A32_SINT,
2637 VK_FORMAT_R16G16B16A16_SINT,
2638 VK_FORMAT_R8G8B8A8_SINT,
2641 VK_FORMAT_R8G8B8A8_UNORM,
2643 VK_FORMAT_B8G8R8A8_UNORM,
2644 VK_FORMAT_B8G8R8A8_UINT,
2646 VK_FORMAT_R8G8B8A8_SNORM,
2648 VK_FORMAT_B10G11R11_UFLOAT_PACK32,
2650 VK_FORMAT_R32G32_SFLOAT,
2651 VK_FORMAT_R16G16_SFLOAT,
2652 VK_FORMAT_R16_SFLOAT,
2654 VK_FORMAT_A2B10G10R10_UINT_PACK32,
2655 VK_FORMAT_R32G32_UINT,
2656 VK_FORMAT_R16G16_UINT,
2658 VK_FORMAT_R8G8_UINT,
2661 VK_FORMAT_R32G32_SINT,
2662 VK_FORMAT_R16G16_SINT,
2664 VK_FORMAT_R8G8_SINT,
2667 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
2668 VK_FORMAT_R16G16B16A16_UNORM,
2669 VK_FORMAT_R16G16B16A16_SNORM,
2670 VK_FORMAT_R16G16_UNORM,
2671 VK_FORMAT_R16_UNORM,
2672 VK_FORMAT_R8G8_UNORM,
2675 VK_FORMAT_R16G16_SNORM,
2676 VK_FORMAT_R16_SNORM,
2677 VK_FORMAT_R8G8_SNORM,
2680 VK_FORMAT_R4G4_UNORM_PACK8,
2681 VK_FORMAT_R4G4B4A4_UNORM_PACK16,
2682 VK_FORMAT_B4G4R4A4_UNORM_PACK16,
2683 VK_FORMAT_R5G6B5_UNORM_PACK16,
2684 VK_FORMAT_B5G6R5_UNORM_PACK16,
2685 VK_FORMAT_R5G5B5A1_UNORM_PACK16,
2686 VK_FORMAT_B5G5R5A1_UNORM_PACK16,
2687 VK_FORMAT_A1R5G5B5_UNORM_PACK16,
2688 VK_FORMAT_B8G8R8A8_SNORM,
2689 VK_FORMAT_B8G8R8A8_SINT,
2690 VK_FORMAT_A8B8G8R8_UNORM_PACK32,
2691 VK_FORMAT_A8B8G8R8_SNORM_PACK32,
2692 VK_FORMAT_A8B8G8R8_UINT_PACK32,
2693 VK_FORMAT_A8B8G8R8_SINT_PACK32,
2694 VK_FORMAT_A2R10G10B10_UNORM_PACK32,
2695 VK_FORMAT_A2R10G10B10_SNORM_PACK32,
2696 VK_FORMAT_A2R10G10B10_UINT_PACK32,
2697 VK_FORMAT_A2R10G10B10_SINT_PACK32,
2698 VK_FORMAT_A2B10G10R10_SNORM_PACK32,
2699 VK_FORMAT_A2B10G10R10_SINT_PACK32,
2700 VK_FORMAT_R32G32B32_UINT,
2701 VK_FORMAT_R32G32B32_SINT,
2702 VK_FORMAT_R32G32B32_SFLOAT,
2703 VK_FORMAT_E5B9G9R9_UFLOAT_PACK32,
2705 VK_FORMAT_R8G8_SRGB,
2706 VK_FORMAT_R8G8B8_SRGB,
2707 VK_FORMAT_B8G8R8_SRGB,
2708 VK_FORMAT_R8G8B8A8_SRGB,
2709 VK_FORMAT_B8G8R8A8_SRGB,
2710 VK_FORMAT_A8B8G8R8_SRGB_PACK32
2713 static const VkFormat s_formatsThreeComponent[] =
2715 VK_FORMAT_R8G8B8_UINT,
2716 VK_FORMAT_R8G8B8_SINT,
2717 VK_FORMAT_R8G8B8_UNORM,
2718 VK_FORMAT_R8G8B8_SNORM,
2719 VK_FORMAT_R16G16B16_UINT,
2720 VK_FORMAT_R16G16B16_SINT,
2721 VK_FORMAT_R16G16B16_UNORM,
2722 VK_FORMAT_R16G16B16_SNORM,
2723 VK_FORMAT_R16G16B16_SFLOAT,
2724 VK_FORMAT_R32G32B32_UINT,
2725 VK_FORMAT_R32G32B32_SINT,
2726 VK_FORMAT_R32G32B32_SFLOAT,
2731 tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
2733 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
2734 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for write images"));
2735 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for write images"));
2737 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2739 const Texture& texture = s_textures[textureNdx];
2740 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2741 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2742 const bool isLayered = (texture.numLayers() > 1);
2744 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2746 const bool hasSpirvFmt = hasSpirvFormat(s_formats[formatNdx]);
2750 groupWithFormatByImageViewType->addChild( new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
2751 // Additional tests where the shader uses constant data for imageStore.
2752 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_constant", "", texture, s_formats[formatNdx], StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER | StoreTest::FLAG_STORE_CONSTANT_VALUE));
2754 groupWithoutFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], 0));
2756 if (isLayered && hasSpirvFmt)
2757 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2758 texture, s_formats[formatNdx],
2759 StoreTest::FLAG_SINGLE_LAYER_BIND | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2761 if (texture.type() == IMAGE_TYPE_BUFFER)
2764 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], StoreTest::FLAG_MINALIGN | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2765 groupWithoutFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], StoreTest::FLAG_MINALIGN));
2769 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2770 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2773 testGroup->addChild(testGroupWithFormat.release());
2774 testGroup->addChild(testGroupWithoutFormat.release());
2776 return testGroup.release();
2779 tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
2781 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
2782 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for read images"));
2783 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for read images"));
2785 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2787 const Texture& texture = s_textures[textureNdx];
2788 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2789 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2790 const bool isLayered = (texture.numLayers() > 1);
2792 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2794 // These tests always require a SPIR-V format for the write image, even if the read
2795 // image is being used without a format.
2796 if (!hasSpirvFormat(s_formats[formatNdx]))
2799 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx]));
2800 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], 0));
2803 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2804 texture, s_formats[formatNdx], s_formats[formatNdx],
2805 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2806 if (texture.type() == IMAGE_TYPE_BUFFER)
2808 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2809 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign_uniform", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2810 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN));
2811 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign_uniform", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2815 if (texture.type() == IMAGE_TYPE_BUFFER)
2817 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formatsThreeComponent); ++formatNdx)
2819 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formatsThreeComponent[formatNdx]) + "_uniform", "", texture, s_formatsThreeComponent[formatNdx], s_formatsThreeComponent[formatNdx], LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2820 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formatsThreeComponent[formatNdx]) + "_minalign_uniform", "", texture, s_formatsThreeComponent[formatNdx], s_formatsThreeComponent[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2824 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2825 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2828 testGroup->addChild(testGroupWithFormat.release());
2829 testGroup->addChild(testGroupWithoutFormat.release());
2831 return testGroup.release();
2834 tcu::TestCaseGroup* createImageLoadStoreLodAMDTests (tcu::TestContext& testCtx)
2836 static const Texture textures[] =
2838 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8, 1, 6),
2839 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1, 1, 6),
2840 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1, 1, 6),
2841 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8, 1, 6),
2842 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1, 1, 6),
2843 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6, 1, 6),
2844 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6, 1, 6),
2847 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store_lod", "Cases with imageLoad() followed by imageStore()"));
2848 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for read images"));
2849 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for read images"));
2851 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(textures); ++textureNdx)
2853 const Texture& texture = textures[textureNdx];
2854 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2855 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2856 const bool isLayered = (texture.numLayers() > 1);
2858 if (texture.type() == IMAGE_TYPE_BUFFER)
2861 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2863 // These tests always require a SPIR-V format for the write image, even if the read
2864 // image is being used without a format.
2865 if (!hasSpirvFormat(s_formats[formatNdx]))
2868 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER, DE_TRUE));
2869 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], 0, DE_TRUE));
2872 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2873 texture, s_formats[formatNdx], s_formats[formatNdx],
2874 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER, DE_TRUE));
2877 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2878 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2881 testGroup->addChild(testGroupWithFormat.release());
2882 testGroup->addChild(testGroupWithoutFormat.release());
2884 return testGroup.release();
2887 tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
2889 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
2891 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2893 const Texture& texture = s_textures[textureNdx];
2894 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2896 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
2897 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2899 if (!hasSpirvFormat(s_formats[formatNdx]))
2902 const std::string caseName = getFormatShortString(s_formats[imageFormatNdx]) + "_" + getFormatShortString(s_formats[formatNdx]);
2903 if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
2904 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
2906 testGroup->addChild(groupByImageViewType.release());
2909 return testGroup.release();
2912 de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
2914 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
2915 const Texture& texture = getTestTexture(imageType);
2916 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2922 bool relaxedOK(VkFormat format)
2924 tcu::IVec4 bitDepth = tcu::getTextureFormatBitDepth(mapVkFormat(format));
2925 int maxBitDepth = deMax32(deMax32(bitDepth[0], bitDepth[1]), deMax32(bitDepth[2], bitDepth[3]));
2926 return maxBitDepth <= 16;
2929 // Get a format used for reading or writing in extension operand tests. These formats allow representing the shader sampled type to
2930 // verify results from read or write operations.
2931 VkFormat getShaderExtensionOperandFormat (bool isSigned, bool is64Bit)
2933 const VkFormat formats[] =
2935 VK_FORMAT_R32G32B32A32_UINT,
2936 VK_FORMAT_R32G32B32A32_SINT,
2940 return formats[2u * (is64Bit ? 1u : 0u) + (isSigned ? 1u : 0u)];
2943 // INT or UINT format?
2944 bool isIntegralFormat (VkFormat format)
2946 return (isIntFormat(format) || isUintFormat(format));
2949 // Return the list of formats used for the extension operand tests (SignExten/ZeroExtend).
2950 std::vector<VkFormat> getExtensionOperandFormatList (void)
2952 std::vector<VkFormat> formatList;
2954 for (auto format : s_formats)
2956 if (isIntegralFormat(format))
2957 formatList.push_back(format);
2960 formatList.push_back(VK_FORMAT_R64_SINT);
2961 formatList.push_back(VK_FORMAT_R64_UINT);
2968 tcu::TestCaseGroup* createImageExtendOperandsTests(tcu::TestContext& testCtx)
2970 using GroupPtr = de::MovePtr<tcu::TestCaseGroup>;
2972 GroupPtr testGroup(new tcu::TestCaseGroup(testCtx, "extend_operands_spirv1p4", "Cases with SignExtend and ZeroExtend"));
2976 ExtendTestType testType;
2980 { ExtendTestType::READ, "read" },
2981 { ExtendTestType::WRITE, "write" },
2984 const auto texture = Texture(IMAGE_TYPE_2D, tcu::IVec3(8, 8, 1), 1);
2985 const auto formatList = getExtensionOperandFormatList();
2987 for (const auto format : formatList)
2989 const auto isInt = isIntFormat(format);
2990 const auto isUint = isUintFormat(format);
2991 const auto use64Bits = is64BitIntegerFormat(format);
2993 DE_ASSERT(isInt || isUint);
2995 GroupPtr formatGroup (new tcu::TestCaseGroup(testCtx, getFormatShortString(format).c_str(), ""));
2997 for (const auto& testType : testTypes)
2999 GroupPtr testTypeGroup (new tcu::TestCaseGroup(testCtx, testType.name, ""));
3001 for (int match = 0; match < 2; ++match)
3003 const bool mismatched = (match == 1);
3004 const char* matchGroupName = (mismatched ? "mismatched_sign" : "matched_sign");
3006 // SPIR-V does not allow this kind of sampled type override.
3007 if (mismatched && isUint)
3010 GroupPtr matchGroup (new tcu::TestCaseGroup(testCtx, matchGroupName, ""));
3012 for (int prec = 0; prec < 2; prec++)
3014 const bool relaxedPrecision = (prec != 0);
3016 const char* precisionName = (relaxedPrecision ? "relaxed_precision" : "normal_precision");
3017 const auto signedOther = ((isInt && !mismatched) || (isUint && mismatched));
3018 const auto otherFormat = getShaderExtensionOperandFormat(signedOther, use64Bits);
3019 const auto readFormat = (testType.testType == ExtendTestType::READ ? format : otherFormat);
3020 const auto writeFormat = (testType.testType == ExtendTestType::WRITE ? format : otherFormat);
3022 if (relaxedPrecision && !relaxedOK(readFormat))
3025 if (!hasSpirvFormat(readFormat) || !hasSpirvFormat(writeFormat))
3028 matchGroup->addChild(new ImageExtendOperandTest(testCtx, precisionName, texture, readFormat, writeFormat, mismatched, relaxedPrecision, testType.testType));
3031 testTypeGroup->addChild(matchGroup.release());
3034 formatGroup->addChild(testTypeGroup.release());
3037 testGroup->addChild(formatGroup.release());
3040 return testGroup.release();
3043 tcu::TestCaseGroup* createImageNontemporalOperandTests(tcu::TestContext& testCtx)
3045 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "nontemporal_operand", "Cases with Nontemporal image operand for SPOIR-V 1.6"));
3047 const auto texture = Texture(IMAGE_TYPE_2D, tcu::IVec3(8, 8, 1), 1);
3049 // using just integer formats for tests so that ImageExtendOperandTest could be reused
3050 const auto formatList = getExtensionOperandFormatList();
3052 for (const auto format : formatList)
3054 const std::string caseName = getFormatShortString(format);
3055 const auto readFormat = format;
3056 const auto writeFormat = getShaderExtensionOperandFormat(isIntFormat(format), is64BitIntegerFormat(format));
3058 if (!hasSpirvFormat(readFormat) || !hasSpirvFormat(writeFormat))
3061 // note: just testing OpImageWrite as OpImageRead is tested with addComputeImageSamplerTest
3062 testGroup->addChild(new ImageExtendOperandTest(testCtx, caseName, texture,
3063 readFormat, writeFormat, false, false, ExtendTestType::WRITE_NONTEMPORAL));
3066 return testGroup.release();