1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktImageLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBarrierUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42 #include "vkObjUtil.hpp"
45 #include "deUniquePtr.hpp"
46 #include "deSharedPtr.hpp"
47 #include "deStringUtil.hpp"
49 #include "tcuImageCompare.hpp"
50 #include "tcuTexture.hpp"
51 #include "tcuTextureUtil.hpp"
52 #include "tcuFloat.hpp"
53 #include "tcuStringTemplate.hpp"
68 // Check for three-component (non-packed) format, i.e. pixel size is a multiple of 3.
69 bool formatHasThreeComponents(VkFormat format)
71 const tcu::TextureFormat texFormat = mapVkFormat(format);
72 return (getPixelSize(texFormat) % 3) == 0;
75 VkFormat getSingleComponentFormat(VkFormat format)
77 tcu::TextureFormat texFormat = mapVkFormat(format);
78 texFormat = tcu::TextureFormat(tcu::TextureFormat::R, texFormat.type);
79 return mapTextureFormat(texFormat);
82 inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
84 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
87 tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
89 switch (texture.type())
93 case IMAGE_TYPE_BUFFER:
95 DE_ASSERT(layer == 0);
98 case IMAGE_TYPE_1D_ARRAY:
99 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
101 case IMAGE_TYPE_2D_ARRAY:
102 case IMAGE_TYPE_CUBE:
103 case IMAGE_TYPE_CUBE_ARRAY:
104 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
105 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
108 DE_FATAL("Internal test error");
109 return tcu::ConstPixelBufferAccess();
113 //! \return true if all layers match in both pixel buffers
114 bool comparePixelBuffers (tcu::TestLog& log,
115 const Texture& texture,
116 const VkFormat format,
117 const tcu::ConstPixelBufferAccess reference,
118 const tcu::ConstPixelBufferAccess result)
120 DE_ASSERT(reference.getFormat() == result.getFormat());
121 DE_ASSERT(reference.getSize() == result.getSize());
123 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
124 const int numLayersOrSlices = (is3d ? texture.size().z() : texture.numLayers());
125 const int numCubeFaces = 6;
127 int passedLayers = 0;
128 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
130 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
131 const std::string comparisonDesc = "Image Comparison, " +
132 (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
133 is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx));
135 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
136 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
140 switch (tcu::getTextureChannelClass(mapVkFormat(format).type))
142 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
143 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
145 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
149 case tcu::TEXTURECHANNELCLASS_UNSIGNED_FIXED_POINT:
151 // Allow error of minimum representable difference
152 const tcu::Vec4 threshold (1.0f / ((tcu::UVec4(1u) << tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>()) - 1u).cast<float>());
154 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
158 case tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT:
160 // Allow error of minimum representable difference
161 const tcu::Vec4 threshold (1.0f / ((tcu::UVec4(1u) << (tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>() - 1u)) - 1u).cast<float>());
163 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
167 case tcu::TEXTURECHANNELCLASS_FLOATING_POINT:
169 // Convert target format ulps to float ulps and allow 1 ulp difference
170 const tcu::UVec4 threshold (tcu::UVec4(1u) << (tcu::UVec4(23) - tcu::getTextureFormatMantissaBitDepth(mapVkFormat(format)).cast<deUint32>()));
172 ok = tcu::floatUlpThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, threshold, tcu::COMPARE_LOG_RESULT);
177 DE_FATAL("Unknown channel class");
184 return passedLayers == numLayersOrSlices;
187 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
188 void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
190 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
192 for (int z = 0; z < access.getDepth(); ++z)
193 for (int y = 0; y < access.getHeight(); ++y)
194 for (int x = 0; x < access.getWidth(); ++x)
196 const tcu::Vec4 color(access.getPixel(x, y, z));
197 tcu::Vec4 newColor = color;
199 for (int i = 0; i < 4; ++i)
201 if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
203 const tcu::Float16 f(color[i]);
204 if (f.isDenorm() || f.isInf() || f.isNaN())
209 const tcu::Float32 f(color[i]);
210 if (f.isDenorm() || f.isInf() || f.isNaN())
215 if (newColor != color)
216 access.setPixel(newColor, x, y, z);
220 //!< replace invalid pixels in the image (-128)
221 void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
223 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
225 for (int z = 0; z < access.getDepth(); ++z)
226 for (int y = 0; y < access.getHeight(); ++y)
227 for (int x = 0; x < access.getWidth(); ++x)
229 const tcu::IVec4 color(access.getPixelInt(x, y, z));
230 tcu::IVec4 newColor = color;
232 for (int i = 0; i < 4; ++i)
234 const deInt32 oldColor(color[i]);
235 if (oldColor == -128) newColor[i] = -127;
238 if (newColor != color)
239 access.setPixel(newColor, x, y, z);
243 tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
245 // Generate a reference image data using the storage format
247 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
248 const tcu::PixelBufferAccess access = reference.getAccess();
250 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
251 const float storeColorBias = computeStoreColorBias(imageFormat);
253 const bool intFormat = isIntegerFormat(imageFormat);
254 const bool storeNegativeValues = isSignedFormat(imageFormat) && (storeColorBias == 0);
255 const int xMax = imageSize.x() - 1;
256 const int yMax = imageSize.y() - 1;
258 for (int z = 0; z < imageSize.z(); ++z)
259 for (int y = 0; y < imageSize.y(); ++y)
260 for (int x = 0; x < imageSize.x(); ++x)
262 tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
264 if (storeNegativeValues)
265 color -= tcu::IVec4(deRoundFloatToInt32((float)de::max(xMax, yMax) / 2.0f));
268 access.setPixel(color, x, y, z);
270 access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
273 // If the image is to be accessed as a float texture, get rid of invalid values
275 if (isFloatFormat(readFormat) && imageFormat != readFormat)
276 replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
277 if (isSnormFormat(readFormat) && imageFormat != readFormat)
278 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
283 inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
285 return generateReferenceImage(imageSize, imageFormat, imageFormat);
288 void flipHorizontally (const tcu::PixelBufferAccess access)
290 const int xMax = access.getWidth() - 1;
291 const int halfWidth = access.getWidth() / 2;
293 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
294 for (int z = 0; z < access.getDepth(); z++)
295 for (int y = 0; y < access.getHeight(); y++)
296 for (int x = 0; x < halfWidth; x++)
298 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
299 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
300 access.setPixel(temp, x, y, z);
303 for (int z = 0; z < access.getDepth(); z++)
304 for (int y = 0; y < access.getHeight(); y++)
305 for (int x = 0; x < halfWidth; x++)
307 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
308 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
309 access.setPixel(temp, x, y, z);
313 inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
315 return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
318 void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
320 const DeviceInterface& vk = context.getDeviceInterface();
322 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
323 const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
324 VK_ACCESS_SHADER_WRITE_BIT, 0u,
325 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
326 image, fullImageSubresourceRange);
328 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
331 void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
333 const DeviceInterface& vk = context.getDeviceInterface();
335 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
336 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
337 buffer, 0ull, bufferSizeBytes);
339 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
342 //! Copy all layers of an image to a buffer.
343 void commandCopyImageToBuffer (Context& context,
344 const VkCommandBuffer cmdBuffer,
346 const VkBuffer buffer,
347 const VkDeviceSize bufferSizeBytes,
348 const Texture& texture)
350 const DeviceInterface& vk = context.getDeviceInterface();
352 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
353 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
354 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
355 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
356 image, fullImageSubresourceRange);
358 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
360 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
361 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
362 buffer, 0ull, bufferSizeBytes);
364 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
365 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
366 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
369 class StoreTest : public TestCase
374 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
375 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 0x2, //!< Declare the format of the images in the shader code
376 FLAG_MINALIGN = 0x4, //!< Use bufferview offset that matches the advertised minimum alignment
379 StoreTest (tcu::TestContext& testCtx,
380 const std::string& name,
381 const std::string& description,
382 const Texture& texture,
383 const VkFormat format,
384 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER);
386 virtual void checkSupport (Context& context) const;
387 void initPrograms (SourceCollections& programCollection) const;
388 TestInstance* createInstance (Context& context) const;
391 const Texture m_texture;
392 const VkFormat m_format;
393 const bool m_declareImageFormatInShader;
394 const bool m_singleLayerBind;
395 const bool m_minalign;
398 StoreTest::StoreTest (tcu::TestContext& testCtx,
399 const std::string& name,
400 const std::string& description,
401 const Texture& texture,
402 const VkFormat format,
403 const deUint32 flags)
404 : TestCase (testCtx, name, description)
405 , m_texture (texture)
407 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
408 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
409 , m_minalign ((flags & FLAG_MINALIGN) != 0)
411 if (m_singleLayerBind)
412 DE_ASSERT(m_texture.numLayers() > 1);
415 void StoreTest::checkSupport (Context& context) const
417 const VkFormatProperties formatProperties (getPhysicalDeviceFormatProperties(context.getInstanceInterface(), context.getPhysicalDevice(), m_format));
419 if (!m_declareImageFormatInShader)
420 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_WRITE_WITHOUT_FORMAT);
422 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
423 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
425 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
426 TCU_THROW(NotSupportedError, "Format not supported for storage images");
428 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
429 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
432 void StoreTest::initPrograms (SourceCollections& programCollection) const
434 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
435 const float storeColorBias = computeStoreColorBias(m_format);
436 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
438 const deUint32 xMax = m_texture.size().x() - 1;
439 const deUint32 yMax = m_texture.size().y() - 1;
440 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
441 const bool storeNegativeValues = isSignedFormat(m_format) && (storeColorBias == 0);
442 bool useClamp = false;
443 std::string colorBaseExpr = signednessPrefix + "vec4("
445 + "(" + de::toString(xMax) + "-gx)^gy^gz, "
446 + "gx^(" + de::toString(yMax) + "-gy)^gz, "
447 + "(" + de::toString(xMax) + "-gx)^(" + de::toString(yMax) + "-gy)^gz)";
449 // Large integer values may not be represented with formats with low bit depths
450 if (isIntegerFormat(m_format))
452 const deInt64 minStoreValue = storeNegativeValues ? 0 - deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : 0;
453 const deInt64 maxStoreValue = storeNegativeValues ? deRoundFloatToInt64((float)de::max(xMax, yMax) / 2.0f) : de::max(xMax, yMax);
455 useClamp = !isRepresentableIntegerValue(tcu::Vector<deInt64, 4>(minStoreValue), mapVkFormat(m_format)) ||
456 !isRepresentableIntegerValue(tcu::Vector<deInt64, 4>(maxStoreValue), mapVkFormat(m_format));
459 // Clamp if integer value cannot be represented with the current format
462 const tcu::IVec4 bitDepths = tcu::getTextureFormatBitDepth(mapVkFormat(m_format));
463 tcu::IVec4 minRepresentableValue;
464 tcu::IVec4 maxRepresentableValue;
466 switch (tcu::getTextureChannelClass(mapVkFormat(m_format).type))
468 case tcu::TEXTURECHANNELCLASS_UNSIGNED_INTEGER:
470 minRepresentableValue = tcu::IVec4(0);
471 maxRepresentableValue = (tcu::IVec4(1) << bitDepths) - tcu::IVec4(1);
475 case tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER:
477 minRepresentableValue = -(tcu::IVec4(1) << bitDepths - tcu::IVec4(1));
478 maxRepresentableValue = (tcu::IVec4(1) << (bitDepths - tcu::IVec4(1))) - tcu::IVec4(1);
483 DE_ASSERT(isIntegerFormat(m_format));
486 colorBaseExpr = "clamp(" + colorBaseExpr + ", "
487 + signednessPrefix + "vec4" + de::toString(minRepresentableValue) + ", "
488 + signednessPrefix + "vec4" + de::toString(maxRepresentableValue) + ")";
491 std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
492 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
494 if (storeNegativeValues)
495 colorExpr += "-" + de::toString(deRoundFloatToInt32((float)deMax32(xMax, yMax) / 2.0f));
497 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
498 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
500 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
501 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
502 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
504 std::ostringstream src;
505 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
507 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
508 if (m_declareImageFormatInShader)
509 src << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr << " u_image;\n";
511 src << "layout (binding = 0) writeonly uniform " << imageTypeStr << " u_image;\n";
513 if (m_singleLayerBind)
514 src << "layout (binding = 1) readonly uniform Constants {\n"
515 << " int u_layerNdx;\n"
519 << "void main (void)\n"
521 << " int gx = int(gl_GlobalInvocationID.x);\n"
522 << " int gy = int(gl_GlobalInvocationID.y);\n"
523 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
524 << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
527 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
530 //! Generic test iteration algorithm for image tests
531 class BaseTestInstance : public TestInstance
534 BaseTestInstance (Context& context,
535 const Texture& texture,
536 const VkFormat format,
537 const bool declareImageFormatInShader,
538 const bool singleLayerBind,
540 const bool bufferLoadUniform);
542 tcu::TestStatus iterate (void);
544 virtual ~BaseTestInstance (void) {}
547 virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
548 virtual tcu::TestStatus verifyResult (void) = 0;
550 virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
551 virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
552 virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
554 virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
555 const VkPipelineLayout pipelineLayout,
556 const int layerNdx) = 0;
557 virtual deUint32 getViewOffset (Context& context,
558 const VkFormat format,
561 const Texture m_texture;
562 const VkFormat m_format;
563 const bool m_declareImageFormatInShader;
564 const bool m_singleLayerBind;
565 const bool m_minalign;
566 const bool m_bufferLoadUniform;
567 const deUint32 m_srcViewOffset;
568 const deUint32 m_dstViewOffset;
571 BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign, const bool bufferLoadUniform)
572 : TestInstance (context)
573 , m_texture (texture)
575 , m_declareImageFormatInShader (declareImageFormatInShader)
576 , m_singleLayerBind (singleLayerBind)
577 , m_minalign (minalign)
578 , m_bufferLoadUniform (bufferLoadUniform)
579 , m_srcViewOffset (getViewOffset(context, format, m_bufferLoadUniform))
580 , m_dstViewOffset (getViewOffset(context, formatHasThreeComponents(format) ? getSingleComponentFormat(format) : format, false))
584 tcu::TestStatus BaseTestInstance::iterate (void)
586 const DeviceInterface& vk = m_context.getDeviceInterface();
587 const VkDevice device = m_context.getDevice();
588 const VkQueue queue = m_context.getUniversalQueue();
589 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
591 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
593 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
594 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
595 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
597 const Unique<VkCommandPool> cmdPool(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex));
598 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
600 beginCommandBuffer(vk, *cmdBuffer);
602 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
603 commandBeforeCompute(*cmdBuffer);
605 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
606 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
607 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
609 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
612 commandBetweenShaderInvocations(*cmdBuffer);
614 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
617 commandAfterCompute(*cmdBuffer);
619 endCommandBuffer(vk, *cmdBuffer);
621 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
623 return verifyResult();
626 //! Base store test implementation
627 class StoreTestInstance : public BaseTestInstance
630 StoreTestInstance (Context& context,
631 const Texture& texture,
632 const VkFormat format,
633 const bool declareImageFormatInShader,
634 const bool singleLayerBind,
635 const bool minalign);
638 virtual tcu::TestStatus verifyResult (void);
640 // Add empty implementations for functions that might be not needed
641 void commandBeforeCompute (const VkCommandBuffer) {}
642 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
643 void commandAfterCompute (const VkCommandBuffer) {}
645 de::MovePtr<Buffer> m_imageBuffer;
646 const VkDeviceSize m_imageSizeBytes;
649 deUint32 BaseTestInstance::getViewOffset(Context& context,
650 const VkFormat format,
655 if (!context.getTexelBufferAlignmentFeatures().texelBufferAlignment)
656 return (deUint32)context.getDeviceProperties().limits.minTexelBufferOffsetAlignment;
658 VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT alignmentProperties;
659 deMemset(&alignmentProperties, 0, sizeof(alignmentProperties));
660 alignmentProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TEXEL_BUFFER_ALIGNMENT_PROPERTIES_EXT;
662 VkPhysicalDeviceProperties2 properties2;
663 deMemset(&properties2, 0, sizeof(properties2));
664 properties2.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
665 properties2.pNext = &alignmentProperties;
667 context.getInstanceInterface().getPhysicalDeviceProperties2(context.getPhysicalDevice(), &properties2);
669 VkBool32 singleTexelAlignment = uniform ? alignmentProperties.uniformTexelBufferOffsetSingleTexelAlignment :
670 alignmentProperties.storageTexelBufferOffsetSingleTexelAlignment;
671 VkDeviceSize align = uniform ? alignmentProperties.uniformTexelBufferOffsetAlignmentBytes :
672 alignmentProperties.storageTexelBufferOffsetAlignmentBytes;
674 VkDeviceSize texelSize = formatHasThreeComponents(format) ? tcu::getChannelSize(vk::mapVkFormat(format).type) : tcu::getPixelSize(vk::mapVkFormat(format));
676 if (singleTexelAlignment)
677 align = de::min(align, texelSize);
679 return (deUint32)align;
685 StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind, const bool minalign)
686 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, false)
687 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
689 const DeviceInterface& vk = m_context.getDeviceInterface();
690 const VkDevice device = m_context.getDevice();
691 Allocator& allocator = m_context.getDefaultAllocator();
693 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
695 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
696 vk, device, allocator,
697 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
698 MemoryRequirement::HostVisible));
701 tcu::TestStatus StoreTestInstance::verifyResult (void)
703 const DeviceInterface& vk = m_context.getDeviceInterface();
704 const VkDevice device = m_context.getDevice();
706 const tcu::IVec3 imageSize = m_texture.size();
707 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
709 const Allocation& alloc = m_imageBuffer->getAllocation();
710 invalidateAlloc(vk, device, alloc);
711 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, (const char *)alloc.getHostPtr() + m_dstViewOffset);
713 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
714 return tcu::TestStatus::pass("Passed");
716 return tcu::TestStatus::fail("Image comparison failed");
719 //! Store test for images
720 class ImageStoreTestInstance : public StoreTestInstance
723 ImageStoreTestInstance (Context& context,
724 const Texture& texture,
725 const VkFormat format,
726 const bool declareImageFormatInShader,
727 const bool singleLayerBind,
728 const bool minalign);
731 VkDescriptorSetLayout prepareDescriptors (void);
732 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
733 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
734 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
736 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
737 const VkPipelineLayout pipelineLayout,
740 de::MovePtr<Image> m_image;
741 de::MovePtr<Buffer> m_constantsBuffer;
742 const VkDeviceSize m_constantsBufferChunkSizeBytes;
743 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
744 Move<VkDescriptorPool> m_descriptorPool;
745 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
746 std::vector<SharedVkImageView> m_allImageViews;
749 ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
750 const Texture& texture,
751 const VkFormat format,
752 const bool declareImageFormatInShader,
753 const bool singleLayerBind,
755 : StoreTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign)
756 , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context.getInstanceInterface(), context.getPhysicalDevice(), sizeof(deUint32)))
757 , m_allDescriptorSets (texture.numLayers())
758 , m_allImageViews (texture.numLayers())
760 const DeviceInterface& vk = m_context.getDeviceInterface();
761 const VkDevice device = m_context.getDevice();
762 Allocator& allocator = m_context.getDefaultAllocator();
764 m_image = de::MovePtr<Image>(new Image(
765 vk, device, allocator,
766 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
767 MemoryRequirement::Any));
769 // This buffer will be used to pass constants to the shader
771 const int numLayers = m_texture.numLayers();
772 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
773 m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
774 vk, device, allocator,
775 makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
776 MemoryRequirement::HostVisible));
779 const Allocation& alloc = m_constantsBuffer->getAllocation();
780 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
782 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
784 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
786 deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
787 *valuePtr = static_cast<deUint32>(layerNdx);
790 flushAlloc(vk, device, alloc);
794 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
796 const DeviceInterface& vk = m_context.getDeviceInterface();
797 const VkDevice device = m_context.getDevice();
799 const int numLayers = m_texture.numLayers();
800 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
801 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
802 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
805 m_descriptorPool = DescriptorPoolBuilder()
806 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
807 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
808 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
810 if (m_singleLayerBind)
812 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
814 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
815 m_allImageViews[layerNdx] = makeVkSharedPtr(makeImageView(
816 vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
817 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
820 else // bind all layers at once
822 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
823 m_allImageViews[0] = makeVkSharedPtr(makeImageView(
824 vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
825 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
828 return *m_descriptorSetLayout; // not passing the ownership
831 void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
833 const DeviceInterface& vk = m_context.getDeviceInterface();
834 const VkDevice device = m_context.getDevice();
836 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
837 const VkImageView imageView = **m_allImageViews[layerNdx];
839 const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
841 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
842 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
843 m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
845 DescriptorSetUpdateBuilder()
846 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
847 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
849 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
852 void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
854 const DeviceInterface& vk = m_context.getDeviceInterface();
856 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
857 const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
859 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
860 m_image->get(), fullImageSubresourceRange);
862 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
863 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
864 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
865 m_constantsBuffer->get(), 0ull, constantsBufferSize);
867 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
870 void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
872 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
875 void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
877 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
880 //! Store test for buffers
881 class BufferStoreTestInstance : public StoreTestInstance
884 BufferStoreTestInstance (Context& context,
885 const Texture& texture,
886 const VkFormat format,
887 const bool declareImageFormatInShader,
888 const bool minalign);
891 VkDescriptorSetLayout prepareDescriptors (void);
892 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
894 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
895 const VkPipelineLayout pipelineLayout,
898 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
899 Move<VkDescriptorPool> m_descriptorPool;
900 Move<VkDescriptorSet> m_descriptorSet;
901 Move<VkBufferView> m_bufferView;
904 BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
905 const Texture& texture,
906 const VkFormat format,
907 const bool declareImageFormatInShader,
909 : StoreTestInstance(context, texture, format, declareImageFormatInShader, false, minalign)
913 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
915 const DeviceInterface& vk = m_context.getDeviceInterface();
916 const VkDevice device = m_context.getDevice();
918 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
919 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
922 m_descriptorPool = DescriptorPoolBuilder()
923 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
924 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
926 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
927 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_dstViewOffset, m_imageSizeBytes);
929 return *m_descriptorSetLayout; // not passing the ownership
932 void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
934 DE_ASSERT(layerNdx == 0);
937 const VkDevice device = m_context.getDevice();
938 const DeviceInterface& vk = m_context.getDeviceInterface();
940 DescriptorSetUpdateBuilder()
941 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
943 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
946 void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
948 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes + m_dstViewOffset);
951 class LoadStoreTest : public TestCase
956 FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
957 FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
958 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 1 << 2, //!< Declare the format of the images in the shader code
959 FLAG_MINALIGN = 1 << 3, //!< Use bufferview offset that matches the advertised minimum alignment
960 FLAG_UNIFORM_TEXEL_BUFFER = 1 << 4, //!< Load from a uniform texel buffer rather than a storage texel buffer
963 LoadStoreTest (tcu::TestContext& testCtx,
964 const std::string& name,
965 const std::string& description,
966 const Texture& texture,
967 const VkFormat format,
968 const VkFormat imageFormat,
969 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER);
971 virtual void checkSupport (Context& context) const;
972 void initPrograms (SourceCollections& programCollection) const;
973 TestInstance* createInstance (Context& context) const;
976 const Texture m_texture;
977 const VkFormat m_format; //!< Format as accessed in the shader
978 const VkFormat m_imageFormat; //!< Storage format
979 const bool m_declareImageFormatInShader; //!< Whether the shader will specify the format layout qualifier of the images
980 const bool m_singleLayerBind;
981 const bool m_restrictImages;
982 const bool m_minalign;
983 bool m_bufferLoadUniform;
986 LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
987 const std::string& name,
988 const std::string& description,
989 const Texture& texture,
990 const VkFormat format,
991 const VkFormat imageFormat,
992 const deUint32 flags)
993 : TestCase (testCtx, name, description)
994 , m_texture (texture)
996 , m_imageFormat (imageFormat)
997 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
998 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
999 , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
1000 , m_minalign ((flags & FLAG_MINALIGN) != 0)
1001 , m_bufferLoadUniform ((flags & FLAG_UNIFORM_TEXEL_BUFFER) != 0)
1003 if (m_singleLayerBind)
1004 DE_ASSERT(m_texture.numLayers() > 1);
1006 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
1009 void LoadStoreTest::checkSupport (Context& context) const
1011 const vk::VkFormatProperties formatProperties (vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
1012 context.getPhysicalDevice(),
1014 const vk::VkFormatProperties imageFormatProperties (vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
1015 context.getPhysicalDevice(),
1018 if (!m_bufferLoadUniform && !m_declareImageFormatInShader)
1019 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_SHADER_STORAGE_IMAGE_READ_WITHOUT_FORMAT);
1021 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY)
1022 context.requireDeviceCoreFeature(DEVICE_CORE_FEATURE_IMAGE_CUBE_ARRAY);
1024 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1025 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1027 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1028 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1030 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(imageFormatProperties.optimalTilingFeatures))
1031 TCU_THROW(NotSupportedError, "Underlying format not supported at all for images");
1033 if ((m_texture.type() == IMAGE_TYPE_BUFFER) && !(imageFormatProperties.bufferFeatures))
1034 TCU_THROW(NotSupportedError, "Underlying format not supported at all for buffers");
1036 if (formatHasThreeComponents(m_format))
1038 // When the source buffer is three-component, the destination buffer is single-component.
1039 VkFormat dstFormat = getSingleComponentFormat(m_format);
1040 const vk::VkFormatProperties dstFormatProperties (vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
1041 context.getPhysicalDevice(),
1044 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(dstFormatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1045 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1048 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1049 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1051 if (m_bufferLoadUniform && m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT))
1052 TCU_THROW(NotSupportedError, "Format not supported for uniform texel buffers");
1055 void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
1057 const tcu::TextureFormat texFormat = mapVkFormat(m_format);
1058 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
1059 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
1060 const std::string formatQualifierStr = getShaderImageFormatQualifier(texFormat);
1061 const std::string uniformTypeStr = getFormatPrefix(texFormat) + "textureBuffer";
1062 const std::string imageTypeStr = getShaderImageType(texFormat, usedImageType);
1063 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
1064 const std::string xMax = de::toString(m_texture.size().x() - 1);
1066 std::ostringstream src;
1067 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
1069 if (!m_declareImageFormatInShader)
1071 src << "#extension GL_EXT_shader_image_load_formatted : require\n";
1073 src << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
1074 if (m_bufferLoadUniform)
1075 src << "layout (binding = 0) uniform " << uniformTypeStr << " u_image0;\n";
1076 else if (m_declareImageFormatInShader)
1077 src << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
1079 src << "layout (binding = 0) " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
1081 if (formatHasThreeComponents(m_format))
1082 src << "layout (binding = 1) " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n";
1084 src << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n";
1087 << "void main (void)\n"
1091 default: DE_ASSERT(0); // fallthrough
1093 if (m_bufferLoadUniform)
1095 // for three-component formats, the dst buffer is single-component and the shader
1096 // expands the store into 3 component-wise stores.
1097 std::string type = getFormatPrefix(texFormat) + "vec4";
1098 src << " int pos = int(gl_GlobalInvocationID.x);\n"
1099 " " << type << " t = texelFetch(u_image0, " + xMax + "-pos);\n";
1100 if (formatHasThreeComponents(m_format))
1102 src << " imageStore(u_image1, 3*pos+0, " << type << "(t.x));\n";
1103 src << " imageStore(u_image1, 3*pos+1, " << type << "(t.y));\n";
1104 src << " imageStore(u_image1, 3*pos+2, " << type << "(t.z));\n";
1107 src << " imageStore(u_image1, pos, t);\n";
1111 " int pos = int(gl_GlobalInvocationID.x);\n"
1112 " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n";
1116 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
1117 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n";
1121 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
1122 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n";
1127 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
1130 //! Load/store test base implementation
1131 class LoadStoreTestInstance : public BaseTestInstance
1134 LoadStoreTestInstance (Context& context,
1135 const Texture& texture,
1136 const VkFormat format,
1137 const VkFormat imageFormat,
1138 const bool declareImageFormatInShader,
1139 const bool singleLayerBind,
1140 const bool minalign,
1141 const bool bufferLoadUniform);
1144 virtual Buffer* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
1146 tcu::TestStatus verifyResult (void);
1148 // Add empty implementations for functions that might be not needed
1149 void commandBeforeCompute (const VkCommandBuffer) {}
1150 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
1151 void commandAfterCompute (const VkCommandBuffer) {}
1153 de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
1154 const VkDeviceSize m_imageSizeBytes;
1155 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
1156 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
1158 bool m_bufferLoadUniform;
1159 VkDescriptorType m_bufferLoadDescriptorType;
1160 VkBufferUsageFlagBits m_bufferLoadUsageBit;
1163 LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
1164 const Texture& texture,
1165 const VkFormat format,
1166 const VkFormat imageFormat,
1167 const bool declareImageFormatInShader,
1168 const bool singleLayerBind,
1169 const bool minalign,
1170 const bool bufferLoadUniform)
1171 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1172 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
1173 , m_imageFormat (imageFormat)
1174 , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
1175 , m_bufferLoadUniform (bufferLoadUniform)
1177 const DeviceInterface& vk = m_context.getDeviceInterface();
1178 const VkDevice device = m_context.getDevice();
1179 Allocator& allocator = m_context.getDefaultAllocator();
1181 m_bufferLoadDescriptorType = m_bufferLoadUniform ? VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER : VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER;
1182 m_bufferLoadUsageBit = m_bufferLoadUniform ? VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT : VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT;
1184 // A helper buffer with enough space to hold the whole image.
1186 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
1187 vk, device, allocator,
1188 makeBufferCreateInfo(m_imageSizeBytes + m_srcViewOffset, m_bufferLoadUsageBit | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1189 MemoryRequirement::HostVisible));
1191 // Copy reference data to buffer for subsequent upload to image.
1193 const Allocation& alloc = m_imageBuffer->getAllocation();
1194 deMemcpy((char *)alloc.getHostPtr() + m_srcViewOffset, m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
1195 flushAlloc(vk, device, alloc);
1198 tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
1200 const DeviceInterface& vk = m_context.getDeviceInterface();
1201 const VkDevice device = m_context.getDevice();
1203 // Apply the same transformation as done in the shader
1204 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
1205 flipHorizontally(reference);
1207 const Allocation& alloc = getResultBuffer()->getAllocation();
1208 invalidateAlloc(vk, device, alloc);
1209 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), (const char *)alloc.getHostPtr() + m_dstViewOffset);
1211 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
1212 return tcu::TestStatus::pass("Passed");
1214 return tcu::TestStatus::fail("Image comparison failed");
1217 //! Load/store test for images
1218 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
1221 ImageLoadStoreTestInstance (Context& context,
1222 const Texture& texture,
1223 const VkFormat format,
1224 const VkFormat imageFormat,
1225 const bool declareImageFormatInShader,
1226 const bool singleLayerBind,
1227 const bool minalign,
1228 const bool bufferLoadUniform);
1231 VkDescriptorSetLayout prepareDescriptors (void);
1232 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1233 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1234 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1236 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1237 const VkPipelineLayout pipelineLayout,
1238 const int layerNdx);
1240 Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
1242 de::MovePtr<Image> m_imageSrc;
1243 de::MovePtr<Image> m_imageDst;
1244 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1245 Move<VkDescriptorPool> m_descriptorPool;
1246 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1247 std::vector<SharedVkImageView> m_allSrcImageViews;
1248 std::vector<SharedVkImageView> m_allDstImageViews;
1251 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
1252 const Texture& texture,
1253 const VkFormat format,
1254 const VkFormat imageFormat,
1255 const bool declareImageFormatInShader,
1256 const bool singleLayerBind,
1257 const bool minalign,
1258 const bool bufferLoadUniform)
1259 : LoadStoreTestInstance (context, texture, format, imageFormat, declareImageFormatInShader, singleLayerBind, minalign, bufferLoadUniform)
1260 , m_allDescriptorSets (texture.numLayers())
1261 , m_allSrcImageViews (texture.numLayers())
1262 , m_allDstImageViews (texture.numLayers())
1264 const DeviceInterface& vk = m_context.getDeviceInterface();
1265 const VkDevice device = m_context.getDevice();
1266 Allocator& allocator = m_context.getDefaultAllocator();
1267 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1269 m_imageSrc = de::MovePtr<Image>(new Image(
1270 vk, device, allocator,
1271 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
1272 MemoryRequirement::Any));
1274 m_imageDst = de::MovePtr<Image>(new Image(
1275 vk, device, allocator,
1276 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
1277 MemoryRequirement::Any));
1280 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
1282 const VkDevice device = m_context.getDevice();
1283 const DeviceInterface& vk = m_context.getDeviceInterface();
1285 const int numLayers = m_texture.numLayers();
1286 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1287 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1288 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1291 m_descriptorPool = DescriptorPoolBuilder()
1292 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1293 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1294 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1296 if (m_singleLayerBind)
1298 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1300 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1301 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
1303 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1304 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1305 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1308 else // bind all layers at once
1310 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1311 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1313 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1314 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1315 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1318 return *m_descriptorSetLayout; // not passing the ownership
1321 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1323 const VkDevice device = m_context.getDevice();
1324 const DeviceInterface& vk = m_context.getDeviceInterface();
1326 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1327 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1328 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1330 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1331 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1333 DescriptorSetUpdateBuilder()
1334 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1335 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1336 .update(vk, device);
1337 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1340 void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1342 const DeviceInterface& vk = m_context.getDeviceInterface();
1344 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1346 const VkImageMemoryBarrier preCopyImageBarriers[] =
1348 makeImageMemoryBarrier(
1349 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1350 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1351 m_imageSrc->get(), fullImageSubresourceRange),
1352 makeImageMemoryBarrier(
1353 0u, VK_ACCESS_SHADER_WRITE_BIT,
1354 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1355 m_imageDst->get(), fullImageSubresourceRange)
1358 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1359 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1360 m_imageBuffer->get(), 0ull, m_imageSizeBytes + m_srcViewOffset);
1362 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1363 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1366 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1367 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1368 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1369 m_imageSrc->get(), fullImageSubresourceRange);
1371 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1373 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1374 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1378 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1380 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1383 void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1385 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1388 //! Load/store test for buffers
1389 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1392 BufferLoadStoreTestInstance (Context& context,
1393 const Texture& texture,
1394 const VkFormat format,
1395 const VkFormat imageFormat,
1396 const bool declareImageFormatInShader,
1397 const bool minalign,
1398 const bool bufferLoadUniform);
1401 VkDescriptorSetLayout prepareDescriptors (void);
1402 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1404 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1405 const VkPipelineLayout pipelineLayout,
1406 const int layerNdx);
1408 Buffer* getResultBuffer (void) const { return m_imageBufferDst.get(); }
1410 de::MovePtr<Buffer> m_imageBufferDst;
1411 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1412 Move<VkDescriptorPool> m_descriptorPool;
1413 Move<VkDescriptorSet> m_descriptorSet;
1414 Move<VkBufferView> m_bufferViewSrc;
1415 Move<VkBufferView> m_bufferViewDst;
1418 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
1419 const Texture& texture,
1420 const VkFormat format,
1421 const VkFormat imageFormat,
1422 const bool declareImageFormatInShader,
1423 const bool minalign,
1424 const bool bufferLoadUniform)
1425 : LoadStoreTestInstance(context, texture, format, imageFormat, declareImageFormatInShader, false, minalign, bufferLoadUniform)
1427 const DeviceInterface& vk = m_context.getDeviceInterface();
1428 const VkDevice device = m_context.getDevice();
1429 Allocator& allocator = m_context.getDefaultAllocator();
1431 // Create a destination buffer.
1433 m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
1434 vk, device, allocator,
1435 makeBufferCreateInfo(m_imageSizeBytes + m_dstViewOffset, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1436 MemoryRequirement::HostVisible));
1439 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1441 const DeviceInterface& vk = m_context.getDeviceInterface();
1442 const VkDevice device = m_context.getDevice();
1444 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1445 .addSingleBinding(m_bufferLoadDescriptorType, VK_SHADER_STAGE_COMPUTE_BIT)
1446 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1449 m_descriptorPool = DescriptorPoolBuilder()
1450 .addType(m_bufferLoadDescriptorType)
1451 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1452 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1454 VkFormat dstFormat = formatHasThreeComponents(m_format) ? getSingleComponentFormat(m_format) : m_format;
1456 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1457 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, m_srcViewOffset, m_imageSizeBytes);
1458 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), dstFormat, m_dstViewOffset, m_imageSizeBytes);
1460 return *m_descriptorSetLayout; // not passing the ownership
1463 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1465 DE_ASSERT(layerNdx == 0);
1468 const VkDevice device = m_context.getDevice();
1469 const DeviceInterface& vk = m_context.getDeviceInterface();
1471 DescriptorSetUpdateBuilder()
1472 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), m_bufferLoadDescriptorType, &m_bufferViewSrc.get())
1473 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
1474 .update(vk, device);
1475 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1478 void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1480 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes + m_dstViewOffset);
1483 TestInstance* StoreTest::createInstance (Context& context) const
1485 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1486 return new BufferStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_minalign);
1488 return new ImageStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_singleLayerBind, m_minalign);
1491 TestInstance* LoadStoreTest::createInstance (Context& context) const
1493 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1494 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_minalign, m_bufferLoadUniform);
1496 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_singleLayerBind, m_minalign, m_bufferLoadUniform);
1499 class ImageExtendOperandTestInstance : public BaseTestInstance
1502 ImageExtendOperandTestInstance (Context& context,
1503 const Texture& texture,
1504 const VkFormat format,
1505 const bool signExtend);
1507 virtual ~ImageExtendOperandTestInstance (void) {};
1511 VkDescriptorSetLayout prepareDescriptors (void);
1512 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1513 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1514 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1516 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1517 const VkPipelineLayout pipelineLayout,
1518 const int layerNdx);
1520 tcu::TestStatus verifyResult (void);
1526 tcu::TextureLevel m_inputImageData;
1528 de::MovePtr<Image> m_imageSrc; // source image
1529 SharedVkImageView m_imageSrcView;
1531 de::MovePtr<Image> m_imageDst; // dest image
1532 SharedVkImageView m_imageDstView;
1533 VkFormat m_imageDstFormat;
1535 de::MovePtr<Buffer> m_buffer; // result buffer
1536 VkDeviceSize m_bufferSizeBytes;
1538 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1539 Move<VkDescriptorPool> m_descriptorPool;
1540 SharedVkDescriptorSet m_descriptorSet;
1543 ImageExtendOperandTestInstance::ImageExtendOperandTestInstance (Context& context,
1544 const Texture& texture,
1545 const VkFormat format,
1546 const bool signExtend)
1547 : BaseTestInstance (context, texture, format, true, true, false, false)
1548 , m_signExtend (signExtend)
1550 const DeviceInterface& vk = m_context.getDeviceInterface();
1551 const VkDevice device = m_context.getDevice();
1552 Allocator& allocator = m_context.getDefaultAllocator();
1553 const deInt32 width = texture.size().x();
1554 const deInt32 height = texture.size().y();
1555 const tcu::TextureFormat textureFormat = mapVkFormat(m_format);
1557 // Generate reference image
1558 m_isSigned = (getTextureChannelClass(textureFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
1559 m_inputImageData.setStorage(textureFormat, width, height, 1);
1560 const tcu::PixelBufferAccess access = m_inputImageData.getAccess();
1561 int valueStart = m_isSigned ? -width / 2 : 0;
1562 for (int x = 0; x < width; ++x)
1563 for (int y = 0; y < height; ++y)
1565 const tcu::IVec4 color(valueStart + x, valueStart + y, valueStart, valueStart);
1566 access.setPixel(color, x, y);
1569 // Create source image
1570 m_imageSrc = de::MovePtr<Image>(new Image(
1571 vk, device, allocator,
1572 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, 0u),
1573 MemoryRequirement::Any));
1575 // Create destination image
1576 m_bufferSizeBytes = width * height * tcu::getPixelSize(textureFormat);
1577 m_imageDstFormat = m_isSigned ? VK_FORMAT_R32G32B32A32_SINT : VK_FORMAT_R32G32B32A32_UINT;
1578 m_imageDst = de::MovePtr<Image>(new Image(
1579 vk, device, allocator,
1580 makeImageCreateInfo(m_texture, m_imageDstFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
1581 MemoryRequirement::Any));
1583 // Create helper buffer able to store input data and image write result
1584 m_buffer = de::MovePtr<Buffer>(new Buffer(
1585 vk, device, allocator,
1586 makeBufferCreateInfo(m_bufferSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
1587 MemoryRequirement::HostVisible));
1589 const Allocation& alloc = m_buffer->getAllocation();
1590 deMemcpy(alloc.getHostPtr(), m_inputImageData.getAccess().getDataPtr(), static_cast<size_t>(m_bufferSizeBytes));
1591 flushAlloc(vk, device, alloc);
1594 VkDescriptorSetLayout ImageExtendOperandTestInstance::prepareDescriptors (void)
1596 const DeviceInterface& vk = m_context.getDeviceInterface();
1597 const VkDevice device = m_context.getDevice();
1599 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1600 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1601 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1604 m_descriptorPool = DescriptorPoolBuilder()
1605 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
1606 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
1607 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1);
1609 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1610 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
1612 m_descriptorSet = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1613 m_imageSrcView = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1614 m_imageDstView = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_imageDstFormat, subresourceRange));
1616 return *m_descriptorSetLayout; // not passing the ownership
1619 void ImageExtendOperandTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1623 const DeviceInterface& vk = m_context.getDeviceInterface();
1624 const VkDevice device = m_context.getDevice();
1625 const VkDescriptorSet descriptorSet = **m_descriptorSet;
1627 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, **m_imageSrcView, VK_IMAGE_LAYOUT_GENERAL);
1628 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, **m_imageDstView, VK_IMAGE_LAYOUT_GENERAL);
1630 typedef DescriptorSetUpdateBuilder::Location DSUBL;
1631 DescriptorSetUpdateBuilder()
1632 .writeSingle(descriptorSet, DSUBL::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1633 .writeSingle(descriptorSet, DSUBL::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1634 .update(vk, device);
1635 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1638 void ImageExtendOperandTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1640 const DeviceInterface& vk = m_context.getDeviceInterface();
1642 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1644 const VkImageMemoryBarrier preCopyImageBarriers[] =
1646 makeImageMemoryBarrier(
1647 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1648 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1649 m_imageSrc->get(), fullImageSubresourceRange),
1650 makeImageMemoryBarrier(
1651 0u, VK_ACCESS_SHADER_WRITE_BIT,
1652 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1653 m_imageDst->get(), fullImageSubresourceRange)
1656 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1657 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1658 m_buffer->get(), 0ull, m_bufferSizeBytes);
1660 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1661 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1664 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1665 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1666 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1667 m_imageSrc->get(), fullImageSubresourceRange);
1669 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1671 vk.cmdCopyBufferToImage(cmdBuffer, m_buffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1672 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1676 void ImageExtendOperandTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1678 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1681 void ImageExtendOperandTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1683 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_buffer->get(), m_bufferSizeBytes, m_texture);
1686 tcu::TestStatus ImageExtendOperandTestInstance::verifyResult (void)
1688 const DeviceInterface& vk = m_context.getDeviceInterface();
1689 const VkDevice device = m_context.getDevice();
1690 const tcu::IVec3 imageSize = m_texture.size();
1691 const tcu::PixelBufferAccess inputAccess = m_inputImageData.getAccess();
1692 const deInt32 width = inputAccess.getWidth();
1693 const deInt32 height = inputAccess.getHeight();
1694 tcu::TextureLevel refImage (mapVkFormat(m_imageDstFormat), width, height);
1695 tcu::PixelBufferAccess refAccess = refImage.getAccess();
1697 for (int x = 0; x < width; ++x)
1698 for (int y = 0; y < height; ++y)
1700 tcu::IVec4 color = inputAccess.getPixelInt(x, y);
1701 refAccess.setPixel(color, x, y);
1704 const Allocation& alloc = m_buffer->getAllocation();
1705 invalidateAlloc(vk, device, alloc);
1706 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageDstFormat), imageSize, alloc.getHostPtr());
1708 if (intThresholdCompare (m_context.getTestContext().getLog(), "Comparison", "Comparison", refAccess, result, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT))
1709 return tcu::TestStatus::pass("Passed");
1711 return tcu::TestStatus::fail("Image comparison failed");
1714 class ImageExtendOperandTest : public TestCase
1717 ImageExtendOperandTest (tcu::TestContext& testCtx,
1718 const std::string& name,
1719 const Texture texture,
1720 const VkFormat format,
1721 const bool readSigned);
1723 void checkSupport (Context& context) const;
1724 void initPrograms (SourceCollections& programCollection) const;
1725 TestInstance* createInstance (Context& context) const;
1728 const Texture m_texture;
1733 ImageExtendOperandTest::ImageExtendOperandTest (tcu::TestContext& testCtx,
1734 const std::string& name,
1735 const Texture texture,
1736 const VkFormat format,
1737 const bool signExtend)
1738 : TestCase (testCtx, name, "")
1739 , m_texture (texture)
1741 , m_signExtend (signExtend)
1745 void ImageExtendOperandTest::checkSupport (Context& context) const
1747 const vk::VkFormatProperties formatProperties (vk::getPhysicalDeviceFormatProperties(context.getInstanceInterface(),
1748 context.getPhysicalDevice(),
1751 if (!context.requireDeviceExtension("VK_KHR_spirv_1_4"))
1752 TCU_THROW(NotSupportedError, "VK_KHR_spirv_1_4 not supported");
1754 if ((m_texture.type() != IMAGE_TYPE_BUFFER) && !(formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT))
1755 TCU_THROW(NotSupportedError, "Format not supported for storage images");
1757 if (m_texture.type() == IMAGE_TYPE_BUFFER && !(formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT))
1758 TCU_THROW(NotSupportedError, "Format not supported for storage texel buffers");
1761 void ImageExtendOperandTest::initPrograms (SourceCollections& programCollection) const
1763 tcu::StringTemplate shaderTemplate(
1764 "OpCapability Shader\n"
1768 "%std450 = OpExtInstImport \"GLSL.std.450\"\n"
1769 "OpMemoryModel Logical GLSL450\n"
1770 "OpEntryPoint GLCompute %main \"main\" %id %src_image_ptr %dst_image_ptr\n"
1771 "OpExecutionMode %main LocalSize 1 1 1\n"
1774 "OpDecorate %id BuiltIn GlobalInvocationId\n"
1776 "OpDecorate %src_image_ptr DescriptorSet 0\n"
1777 "OpDecorate %src_image_ptr Binding 0\n"
1778 "OpDecorate %src_image_ptr NonWritable\n"
1780 "OpDecorate %dst_image_ptr DescriptorSet 0\n"
1781 "OpDecorate %dst_image_ptr Binding 1\n"
1782 "OpDecorate %dst_image_ptr NonReadable\n"
1785 "%type_void = OpTypeVoid\n"
1786 "%type_i32 = OpTypeInt 32 1\n"
1787 "%type_u32 = OpTypeInt 32 0\n"
1788 "%type_vec3_i32 = OpTypeVector %type_i32 3\n"
1789 "%type_vec3_u32 = OpTypeVector %type_u32 3\n"
1790 "%type_vec4_i32 = OpTypeVector %type_i32 4\n"
1791 "%type_vec4_u32 = OpTypeVector %type_u32 4\n"
1793 "%type_fun_void = OpTypeFunction %type_void\n"
1794 "%type_ptr_fun_i32 = OpTypePointer Function %type_i32\n"
1798 "%type_ptr_in_vec3_u32 = OpTypePointer Input %type_vec3_u32\n"
1799 "%type_ptr_in_u32 = OpTypePointer Input %type_u32\n"
1804 "%id = OpVariable %type_ptr_in_vec3_u32 Input\n"
1806 "${image_variables}"
1809 "%main = OpFunction %type_void None %type_fun_void\n"
1810 "%label = OpLabel\n"
1814 "%coord = OpLoad %type_vec3_u32 %id\n"
1815 "%value = OpImageRead ${read_vect4_type} %src_image %coord ${extend_operand}\n"
1816 " OpImageWrite %dst_image %coord %value ${extend_operand}\n"
1818 " OpFunctionEnd\n");
1820 tcu::TextureFormat tcuFormat = mapVkFormat(m_format);
1821 const ImageType usedImageType = getImageTypeForSingleLayer(m_texture.type());
1822 const std::string imageTypeStr = getShaderImageType(tcuFormat, usedImageType);
1823 const bool isSigned = (getTextureChannelClass(tcuFormat.type) == tcu::TEXTURECHANNELCLASS_SIGNED_INTEGER);
1827 std::string spirvImageFormat;
1828 bool isExtendedFormat;
1830 const std::map<vk::VkFormat, FormatData> formatDataMap =
1832 // Mandatory support
1833 { VK_FORMAT_R32G32B32A32_UINT, { "Rgba32ui", false } },
1834 { VK_FORMAT_R16G16B16A16_UINT, { "Rgba16ui", false } },
1835 { VK_FORMAT_R8G8B8A8_UINT, { "Rgba8ui", false } },
1836 { VK_FORMAT_R32_UINT, { "R32ui", false } },
1837 { VK_FORMAT_R32G32B32A32_SINT, { "Rgba32i", false } },
1838 { VK_FORMAT_R16G16B16A16_SINT, { "Rgba16i", false } },
1839 { VK_FORMAT_R8G8B8A8_SINT, { "Rgba8i", false } },
1840 { VK_FORMAT_R32_SINT, { "R32i", false } },
1842 // Requires StorageImageExtendedFormats capability
1843 { VK_FORMAT_R32G32_UINT, { "Rg32ui", true } },
1844 { VK_FORMAT_R16G16_UINT, { "Rg16ui", true } },
1845 { VK_FORMAT_R16_UINT, { "R16ui", true } },
1846 { VK_FORMAT_R8G8_UINT, { "Rg8ui", true } },
1847 { VK_FORMAT_R8_UINT, { "R8ui", true } },
1848 { VK_FORMAT_R32G32_SINT, { "Rg32i", true } },
1849 { VK_FORMAT_R16G16_SINT, { "Rg16i", true } },
1850 { VK_FORMAT_R16_SINT, { "R16i", true } },
1851 { VK_FORMAT_R8G8_SINT, { "Rg8i", true } },
1852 { VK_FORMAT_R8_SINT, { "R8i", true } },
1853 { VK_FORMAT_A2B10G10R10_UINT_PACK32, { "Rgb10a2ui", true } }
1856 auto it = formatDataMap.find(m_format);
1857 if (it == formatDataMap.end())
1858 DE_ASSERT(DE_FALSE); // Missing int format data
1859 auto spirvImageFormat = it->second.spirvImageFormat;
1860 auto isExtendedFormat = it->second.isExtendedFormat;
1862 // Request additional capability when needed
1863 std::string capability = "";
1864 if (isExtendedFormat)
1865 capability += "OpCapability StorageImageExtendedFormats\n";
1867 // Read type and sampled type must match. For uint formats it does not
1868 // matter if we do a Sign or ZeroExtend, it matters only for sint formats
1869 std::string readTypePostfix = "u32";
1870 if (isSigned && m_signExtend)
1871 readTypePostfix = "i32";
1873 std::map<std::string, std::string> specializations =
1875 { "image_type_id", "%type_image" },
1876 { "image_uni_ptr_type_id", "%type_ptr_uniform_const_image" },
1877 { "image_var_id", "%src_image_ptr" },
1878 { "image_id", "%src_image" },
1879 { "capability", capability },
1880 { "image_format", spirvImageFormat },
1881 { "sampled_type", (std::string("%type_") + readTypePostfix) },
1882 { "read_vect4_type", (std::string("%type_vec4_") + readTypePostfix) },
1883 { "extend_operand", (m_signExtend ? "SignExtend" : "ZeroExtend") }
1886 // Addidtional parametrization is needed for a case when source and destination textures have same format
1887 tcu::StringTemplate imageTypeTemplate(
1888 "${image_type_id} = OpTypeImage ${sampled_type} 2D 0 0 0 2 ${image_format}\n");
1889 tcu::StringTemplate imageUniformTypeTemplate(
1890 "${image_uni_ptr_type_id} = OpTypePointer UniformConstant ${image_type_id}\n");
1891 tcu::StringTemplate imageVariablesTemplate(
1892 "${image_var_id} = OpVariable ${image_uni_ptr_type_id} UniformConstant\n");
1893 tcu::StringTemplate imageLoadTemplate(
1894 "${image_id} = OpLoad ${image_type_id} ${image_var_id}\n");
1896 std::string imageTypes;
1897 std::string imageUniformTypes;
1898 std::string imageVariables;
1899 std::string imageLoad;
1901 // If input image format is the same as output there is less spir-v definitions
1902 if ((m_format == VK_FORMAT_R32G32B32A32_SINT) || (m_format == VK_FORMAT_R32G32B32A32_UINT))
1904 imageTypes = imageTypeTemplate.specialize(specializations);
1905 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
1906 imageVariables = imageVariablesTemplate.specialize(specializations);
1907 imageLoad = imageLoadTemplate.specialize(specializations);
1909 specializations["image_var_id"] = "%dst_image_ptr";
1910 specializations["image_id"] = "%dst_image";
1911 imageVariables += imageVariablesTemplate.specialize(specializations);
1912 imageLoad += imageLoadTemplate.specialize(specializations);
1916 specializations["image_type_id"] = "%type_src_image";
1917 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_src_image";
1918 imageTypes = imageTypeTemplate.specialize(specializations);
1919 imageUniformTypes = imageUniformTypeTemplate.specialize(specializations);
1920 imageVariables = imageVariablesTemplate.specialize(specializations);
1921 imageLoad = imageLoadTemplate.specialize(specializations);
1923 specializations["image_format"] = isSigned ? "Rgba32i" : "Rgba32ui";
1924 specializations["image_type_id"] = "%type_dst_image";
1925 specializations["image_uni_ptr_type_id"] = "%type_ptr_uniform_const_dst_image";
1926 specializations["image_var_id"] = "%dst_image_ptr";
1927 specializations["image_id"] = "%dst_image";
1928 imageTypes += imageTypeTemplate.specialize(specializations);
1929 imageUniformTypes += imageUniformTypeTemplate.specialize(specializations);
1930 imageVariables += imageVariablesTemplate.specialize(specializations);
1931 imageLoad += imageLoadTemplate.specialize(specializations);
1934 specializations["image_types"] = imageTypes;
1935 specializations["image_uniforms"] = imageUniformTypes;
1936 specializations["image_variables"] = imageVariables;
1937 specializations["image_load"] = imageLoad;
1939 // Specialize whole shader and add it to program collection
1940 programCollection.spirvAsmSources.add("comp") << shaderTemplate.specialize(specializations)
1941 << vk::SpirVAsmBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_4, true);
1944 TestInstance* ImageExtendOperandTest::createInstance(Context& context) const
1946 return new ImageExtendOperandTestInstance(context, m_texture, m_format, m_signExtend);
1949 static const Texture s_textures[] =
1951 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
1952 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
1953 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
1954 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
1955 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
1956 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
1957 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
1958 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
1961 const Texture& getTestTexture (const ImageType imageType)
1963 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1964 if (s_textures[textureNdx].type() == imageType)
1965 return s_textures[textureNdx];
1967 DE_FATAL("Internal error");
1968 return s_textures[0];
1971 static const VkFormat s_formats[] =
1973 // Mandatory support
1974 VK_FORMAT_R32G32B32A32_SFLOAT,
1975 VK_FORMAT_R16G16B16A16_SFLOAT,
1976 VK_FORMAT_R32_SFLOAT,
1978 VK_FORMAT_R32G32B32A32_UINT,
1979 VK_FORMAT_R16G16B16A16_UINT,
1980 VK_FORMAT_R8G8B8A8_UINT,
1983 VK_FORMAT_R32G32B32A32_SINT,
1984 VK_FORMAT_R16G16B16A16_SINT,
1985 VK_FORMAT_R8G8B8A8_SINT,
1988 VK_FORMAT_R8G8B8A8_UNORM,
1990 VK_FORMAT_R8G8B8A8_SNORM,
1992 // Requires StorageImageExtendedFormats capability
1993 VK_FORMAT_B10G11R11_UFLOAT_PACK32,
1995 VK_FORMAT_R32G32_SFLOAT,
1996 VK_FORMAT_R16G16_SFLOAT,
1997 VK_FORMAT_R16_SFLOAT,
1999 VK_FORMAT_A2B10G10R10_UINT_PACK32,
2000 VK_FORMAT_R32G32_UINT,
2001 VK_FORMAT_R16G16_UINT,
2003 VK_FORMAT_R8G8_UINT,
2006 VK_FORMAT_R32G32_SINT,
2007 VK_FORMAT_R16G16_SINT,
2009 VK_FORMAT_R8G8_SINT,
2012 VK_FORMAT_A2B10G10R10_UNORM_PACK32,
2013 VK_FORMAT_R16G16B16A16_UNORM,
2014 VK_FORMAT_R16G16B16A16_SNORM,
2015 VK_FORMAT_R16G16_UNORM,
2016 VK_FORMAT_R16_UNORM,
2017 VK_FORMAT_R8G8_UNORM,
2020 VK_FORMAT_R16G16_SNORM,
2021 VK_FORMAT_R16_SNORM,
2022 VK_FORMAT_R8G8_SNORM,
2026 static const VkFormat s_formatsThreeComponent[] =
2028 VK_FORMAT_R8G8B8_UINT,
2029 VK_FORMAT_R8G8B8_SINT,
2030 VK_FORMAT_R8G8B8_UNORM,
2031 VK_FORMAT_R8G8B8_SNORM,
2032 VK_FORMAT_R16G16B16_UINT,
2033 VK_FORMAT_R16G16B16_SINT,
2034 VK_FORMAT_R16G16B16_UNORM,
2035 VK_FORMAT_R16G16B16_SNORM,
2036 VK_FORMAT_R16G16B16_SFLOAT,
2037 VK_FORMAT_R32G32B32_UINT,
2038 VK_FORMAT_R32G32B32_SINT,
2039 VK_FORMAT_R32G32B32_SFLOAT,
2044 tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
2046 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
2047 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for write images"));
2048 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for write images"));
2050 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2052 const Texture& texture = s_textures[textureNdx];
2053 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2054 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2055 const bool isLayered = (texture.numLayers() > 1);
2057 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2059 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
2060 groupWithoutFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], 0));
2063 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2064 texture, s_formats[formatNdx],
2065 StoreTest::FLAG_SINGLE_LAYER_BIND | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2067 if (texture.type() == IMAGE_TYPE_BUFFER)
2069 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], StoreTest::FLAG_MINALIGN | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2070 groupWithoutFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], StoreTest::FLAG_MINALIGN));
2074 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2075 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2078 testGroup->addChild(testGroupWithFormat.release());
2079 testGroup->addChild(testGroupWithoutFormat.release());
2081 return testGroup.release();
2084 tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
2086 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
2087 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for read images"));
2088 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for read images"));
2090 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2092 const Texture& texture = s_textures[textureNdx];
2093 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2094 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2095 const bool isLayered = (texture.numLayers() > 1);
2097 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2099 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx]));
2100 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], 0));
2103 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
2104 texture, s_formats[formatNdx], s_formats[formatNdx],
2105 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2106 if (texture.type() == IMAGE_TYPE_BUFFER)
2108 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2109 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign_uniform", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2110 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN));
2111 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_minalign_uniform", "", texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2115 if (texture.type() == IMAGE_TYPE_BUFFER)
2117 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formatsThreeComponent); ++formatNdx)
2119 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formatsThreeComponent[formatNdx]) + "_uniform", "", texture, s_formatsThreeComponent[formatNdx], s_formatsThreeComponent[formatNdx], LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2120 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formatsThreeComponent[formatNdx]) + "_minalign_uniform", "", texture, s_formatsThreeComponent[formatNdx], s_formatsThreeComponent[formatNdx], LoadStoreTest::FLAG_MINALIGN | LoadStoreTest::FLAG_UNIFORM_TEXEL_BUFFER));
2124 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
2125 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
2128 testGroup->addChild(testGroupWithFormat.release());
2129 testGroup->addChild(testGroupWithoutFormat.release());
2131 return testGroup.release();
2134 tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
2136 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
2138 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
2140 const Texture& texture = s_textures[textureNdx];
2141 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
2143 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
2144 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2146 const std::string caseName = getFormatShortString(s_formats[imageFormatNdx]) + "_" + getFormatShortString(s_formats[formatNdx]);
2147 if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
2148 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
2150 testGroup->addChild(groupByImageViewType.release());
2153 return testGroup.release();
2156 de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
2158 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
2159 const Texture& texture = getTestTexture(imageType);
2160 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
2163 tcu::TestCaseGroup* createImageExtendOperandsTests(tcu::TestContext& testCtx)
2165 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "extend_operands_spirv1p4", "Cases with SignExtend and ZeroExtend"));
2167 const auto texture = Texture(IMAGE_TYPE_2D, tcu::IVec3(8, 8, 1), 1);
2168 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
2170 auto format = s_formats[formatNdx];
2171 bool intFormat = isIntFormat(format);
2172 if (intFormat || isUintFormat(format))
2174 const std::string caseName = getFormatShortString(format) + (intFormat ? "_sign_extend" : "_zero_extend");
2175 testGroup->addChild(new ImageExtendOperandTest(testCtx, caseName, texture, format, intFormat));
2179 return testGroup.release();