1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Mobica Ltd.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and/or associated documentation files (the
9 * "Materials"), to deal in the Materials without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Materials, and to
12 * permit persons to whom the Materials are furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice(s) and this permission notice shall be included
16 * in all copies or substantial portions of the Materials.
18 * The Materials are Confidential Information as defined by the
19 * Khronos Membership Agreement until designated non-confidential by Khronos,
20 * at which point this condition clause shall be removed.
22 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
25 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
26 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
27 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
28 * MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
32 * \brief Image load/store Tests
33 *//*--------------------------------------------------------------------*/
35 #include "vktImageLoadStoreTests.hpp"
36 #include "vktTestCaseUtil.hpp"
37 #include "vktImageTestsUtil.hpp"
38 #include "vktImageTexture.hpp"
42 #include "vkRefUtil.hpp"
43 #include "vkPlatform.hpp"
44 #include "vkPrograms.hpp"
45 #include "vkMemUtil.hpp"
46 #include "vkBuilderUtil.hpp"
47 #include "vkQueryUtil.hpp"
48 #include "vkImageUtil.hpp"
50 #include "deUniquePtr.hpp"
51 #include "deStringUtil.hpp"
53 #include "tcuImageCompare.hpp"
54 #include "tcuTexture.hpp"
55 #include "tcuTextureUtil.hpp"
56 #include "tcuFloat.hpp"
70 inline VkImageCreateInfo makeImageCreateInfo (const Texture& texture, const VkFormat format, const VkImageUsageFlags usage, const VkImageCreateFlags flags)
72 const VkImageCreateInfo imageParams =
74 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
75 DE_NULL, // const void* pNext;
76 (isCube(texture) ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0u) | flags, // VkImageCreateFlags flags;
77 mapImageType(texture.type()), // VkImageType imageType;
78 format, // VkFormat format;
79 makeExtent3D(texture.layerSize()), // VkExtent3D extent;
80 1u, // deUint32 mipLevels;
81 (deUint32)texture.numLayers(), // deUint32 arrayLayers;
82 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
83 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
84 usage, // VkImageUsageFlags usage;
85 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
86 0u, // deUint32 queueFamilyIndexCount;
87 DE_NULL, // const deUint32* pQueueFamilyIndices;
88 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
93 inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
95 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
98 ImageType getImageTypeForSingleLayer (const ImageType imageType)
103 case IMAGE_TYPE_1D_ARRAY:
104 return IMAGE_TYPE_1D;
107 case IMAGE_TYPE_2D_ARRAY:
108 case IMAGE_TYPE_CUBE:
109 case IMAGE_TYPE_CUBE_ARRAY:
110 // A single layer for cube is a 2d face
111 return IMAGE_TYPE_2D;
114 return IMAGE_TYPE_3D;
116 case IMAGE_TYPE_BUFFER:
117 return IMAGE_TYPE_BUFFER;
120 DE_FATAL("Internal test error");
121 return IMAGE_TYPE_LAST;
125 float computeStoreColorScale (const VkFormat format, const tcu::IVec3 imageSize)
127 const int maxImageDimension = de::max(imageSize.x(), de::max(imageSize.y(), imageSize.z()));
128 const float div = static_cast<float>(maxImageDimension - 1);
130 if (isUnormFormat(format))
132 else if (isSnormFormat(format))
138 inline float computeStoreColorBias (const VkFormat format)
140 return isSnormFormat(format) ? -1.0f : 0.0f;
143 inline bool isIntegerFormat (const VkFormat format)
145 return isIntFormat(format) || isUintFormat(format);
148 tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
150 switch (texture.type())
154 case IMAGE_TYPE_BUFFER:
156 DE_ASSERT(layer == 0);
159 case IMAGE_TYPE_1D_ARRAY:
160 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
162 case IMAGE_TYPE_2D_ARRAY:
163 case IMAGE_TYPE_CUBE:
164 case IMAGE_TYPE_CUBE_ARRAY:
165 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
166 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
169 DE_FATAL("Internal test error");
170 return tcu::ConstPixelBufferAccess();
174 std::string getFormatCaseName (const VkFormat format)
176 const std::string fullName = getFormatName(format);
178 DE_ASSERT(de::beginsWith(fullName, "VK_FORMAT_"));
180 return de::toLower(fullName.substr(10));
183 //! \return true if all layers match in both pixel buffers
184 bool comparePixelBuffers (tcu::TestLog& log,
185 const Texture& texture,
186 const VkFormat format,
187 const tcu::ConstPixelBufferAccess reference,
188 const tcu::ConstPixelBufferAccess result)
190 DE_ASSERT(reference.getFormat() == result.getFormat());
191 DE_ASSERT(reference.getSize() == result.getSize());
193 const bool intFormat = isIntegerFormat(format);
194 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
195 const int numLayersOrSlices = (is3d ? texture.size().z() : texture.numLayers());
196 const int numCubeFaces = 6;
198 int passedLayers = 0;
199 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
201 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
202 const std::string comparisonDesc = "Image Comparison, " +
203 (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
204 is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx));
206 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
207 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
211 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
213 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
218 return passedLayers == numLayersOrSlices;
221 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
222 void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
224 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
226 for (int z = 0; z < access.getDepth(); ++z)
227 for (int y = 0; y < access.getHeight(); ++y)
228 for (int x = 0; x < access.getWidth(); ++x)
230 const tcu::Vec4 color(access.getPixel(x, y, z));
231 tcu::Vec4 newColor = color;
233 for (int i = 0; i < 4; ++i)
235 const tcu::Float32 f(color[i]);
236 if (f.isDenorm() || f.isInf() || f.isNaN())
240 if (newColor != color)
241 access.setPixel(newColor, x, y, z);
245 //!< replace invalid pixels in the image (-128)
246 void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
248 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
250 for (int z = 0; z < access.getDepth(); ++z)
251 for (int y = 0; y < access.getHeight(); ++y)
252 for (int x = 0; x < access.getWidth(); ++x)
254 const tcu::IVec4 color(access.getPixelInt(x, y, z));
255 tcu::IVec4 newColor = color;
257 for (int i = 0; i < 4; ++i)
259 const deInt32 oldColor(color[i]);
260 if (oldColor == -128) newColor[i] = -127;
263 if (newColor != color)
264 access.setPixel(newColor, x, y, z);
268 tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
270 // Generate a reference image data using the storage format
272 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
273 const tcu::PixelBufferAccess access = reference.getAccess();
275 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
276 const float storeColorBias = computeStoreColorBias(imageFormat);
278 const bool intFormat = isIntegerFormat(imageFormat);
279 const int xMax = imageSize.x() - 1;
280 const int yMax = imageSize.y() - 1;
282 for (int z = 0; z < imageSize.z(); ++z)
283 for (int y = 0; y < imageSize.y(); ++y)
284 for (int x = 0; x < imageSize.x(); ++x)
286 const tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
289 access.setPixel(color, x, y, z);
291 access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
294 // If the image is to be accessed as a float texture, get rid of invalid values
296 if (isFloatFormat(readFormat) && imageFormat != readFormat)
297 replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
298 if (isSnormFormat(readFormat) && imageFormat != readFormat)
299 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
304 inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
306 return generateReferenceImage(imageSize, imageFormat, imageFormat);
309 void flipHorizontally (const tcu::PixelBufferAccess access)
311 const int xMax = access.getWidth() - 1;
312 const int halfWidth = access.getWidth() / 2;
314 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
315 for (int z = 0; z < access.getDepth(); z++)
316 for (int y = 0; y < access.getHeight(); y++)
317 for (int x = 0; x < halfWidth; x++)
319 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
320 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
321 access.setPixel(temp, x, y, z);
324 for (int z = 0; z < access.getDepth(); z++)
325 for (int y = 0; y < access.getHeight(); y++)
326 for (int x = 0; x < halfWidth; x++)
328 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
329 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
330 access.setPixel(temp, x, y, z);
334 #if defined(DE_DEBUG)
335 inline bool colorScaleAndBiasAreValid (const VkFormat format, const float colorScale, const float colorBias)
337 // Only normalized (fixed-point) formats may have scale/bias
338 const bool integerOrFloatFormat = isIntFormat(format) || isUintFormat(format) || isFloatFormat(format);
339 return !integerOrFloatFormat || (colorScale == 1.0f && colorBias == 0.0f);
343 inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
345 return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
348 void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
350 const DeviceInterface& vk = context.getDeviceInterface();
352 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
353 const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
354 VK_ACCESS_SHADER_WRITE_BIT, 0u,
355 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
356 image, fullImageSubresourceRange);
358 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
361 void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
363 const DeviceInterface& vk = context.getDeviceInterface();
365 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
366 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
367 buffer, 0ull, bufferSizeBytes);
369 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
372 //! Copy all layers of an image to a buffer.
373 void commandCopyImageToBuffer (Context& context,
374 const VkCommandBuffer cmdBuffer,
376 const VkBuffer buffer,
377 const VkDeviceSize bufferSizeBytes,
378 const Texture& texture)
380 const DeviceInterface& vk = context.getDeviceInterface();
382 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
383 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
384 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
385 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
386 image, fullImageSubresourceRange);
388 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
390 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
391 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
392 buffer, 0ull, bufferSizeBytes);
394 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
395 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
396 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
399 //! Minimum chunk size is determined by the offset alignment requirements.
400 VkDeviceSize getOptimalUniformBufferChunkSize (Context& context, VkDeviceSize minimumRequiredChunkSizeBytes)
402 const VkPhysicalDeviceProperties properties = getPhysicalDeviceProperties(context.getInstanceInterface(), context.getPhysicalDevice());
403 const VkDeviceSize alignment = properties.limits.minUniformBufferOffsetAlignment;
405 if (minimumRequiredChunkSizeBytes > alignment)
406 return alignment + (minimumRequiredChunkSizeBytes / alignment) * alignment;
411 class StoreTest : public TestCase
416 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
419 StoreTest (tcu::TestContext& testCtx,
420 const std::string& name,
421 const std::string& description,
422 const Texture& texture,
423 const VkFormat format,
424 const TestFlags flags = static_cast<TestFlags>(0));
426 void initPrograms (SourceCollections& programCollection) const;
428 TestInstance* createInstance (Context& context) const;
431 const Texture m_texture;
432 const VkFormat m_format;
433 const bool m_singleLayerBind;
436 StoreTest::StoreTest (tcu::TestContext& testCtx,
437 const std::string& name,
438 const std::string& description,
439 const Texture& texture,
440 const VkFormat format,
441 const TestFlags flags)
442 : TestCase (testCtx, name, description)
443 , m_texture (texture)
445 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
447 if (m_singleLayerBind)
448 DE_ASSERT(m_texture.numLayers() > 1);
451 void StoreTest::initPrograms (SourceCollections& programCollection) const
453 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
454 const float storeColorBias = computeStoreColorBias(m_format);
455 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
457 const std::string xMax = de::toString(m_texture.size().x() - 1);
458 const std::string yMax = de::toString(m_texture.size().y() - 1);
459 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
460 const std::string colorBaseExpr = signednessPrefix + "vec4("
462 + "(" + xMax + "-gx)^gy^gz, "
463 + "gx^(" + yMax + "-gy)^gz, "
464 + "(" + xMax + "-gx)^(" + yMax + "-gy)^gz)";
466 const std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
467 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
469 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
470 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
472 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
473 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
474 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
476 std::ostringstream src;
477 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
479 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
480 << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform highp " << imageTypeStr << " u_image;\n";
482 if (m_singleLayerBind)
483 src << "layout (binding = 1) readonly uniform Constants {\n"
484 << " int u_layerNdx;\n"
488 << "void main (void)\n"
490 << " int gx = int(gl_GlobalInvocationID.x);\n"
491 << " int gy = int(gl_GlobalInvocationID.y);\n"
492 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
493 << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
496 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
499 //! Generic test iteration algorithm for image tests
500 class BaseTestInstance : public TestInstance
503 BaseTestInstance (Context& context,
504 const Texture& texture,
505 const VkFormat format,
506 const bool singleLayerBind);
508 tcu::TestStatus iterate (void);
510 virtual ~BaseTestInstance (void) {}
513 virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
514 virtual tcu::TestStatus verifyResult (void) = 0;
516 virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
517 virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
518 virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
520 virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
521 const VkPipelineLayout pipelineLayout,
522 const int layerNdx) = 0;
524 const Texture m_texture;
525 const VkFormat m_format;
526 const bool m_singleLayerBind;
529 BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
530 : TestInstance (context)
531 , m_texture (texture)
533 , m_singleLayerBind (singleLayerBind)
537 tcu::TestStatus BaseTestInstance::iterate (void)
539 const DeviceInterface& vk = m_context.getDeviceInterface();
540 const VkDevice device = m_context.getDevice();
541 const VkQueue queue = m_context.getUniversalQueue();
542 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
544 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
546 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
547 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
548 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
550 const Unique<VkCommandPool> cmdPool(makeCommandPool(vk, device, queueFamilyIndex));
551 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
553 beginCommandBuffer(vk, *cmdBuffer);
555 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
556 commandBeforeCompute(*cmdBuffer);
558 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
559 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
560 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
562 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
565 commandBetweenShaderInvocations(*cmdBuffer);
567 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
570 commandAfterCompute(*cmdBuffer);
572 endCommandBuffer(vk, *cmdBuffer);
574 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
576 return verifyResult();
579 //! Base store test implementation
580 class StoreTestInstance : public BaseTestInstance
583 StoreTestInstance (Context& context,
584 const Texture& texture,
585 const VkFormat format,
586 const bool singleLayerBind);
589 tcu::TestStatus verifyResult (void);
591 // Add empty implementations for functions that might be not needed
592 void commandBeforeCompute (const VkCommandBuffer) {}
593 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
594 void commandAfterCompute (const VkCommandBuffer) {}
596 de::MovePtr<Buffer> m_imageBuffer;
597 const VkDeviceSize m_imageSizeBytes;
600 StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
601 : BaseTestInstance (context, texture, format, singleLayerBind)
602 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
604 const DeviceInterface& vk = m_context.getDeviceInterface();
605 const VkDevice device = m_context.getDevice();
606 Allocator& allocator = m_context.getDefaultAllocator();
608 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
610 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
611 vk, device, allocator,
612 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
613 MemoryRequirement::HostVisible));
616 tcu::TestStatus StoreTestInstance::verifyResult (void)
618 const DeviceInterface& vk = m_context.getDeviceInterface();
619 const VkDevice device = m_context.getDevice();
621 const tcu::IVec3 imageSize = m_texture.size();
622 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
624 const Allocation& alloc = m_imageBuffer->getAllocation();
625 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
626 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, alloc.getHostPtr());
628 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
629 return tcu::TestStatus::pass("Passed");
631 return tcu::TestStatus::fail("Image comparison failed");
634 //! Store test for images
635 class ImageStoreTestInstance : public StoreTestInstance
638 ImageStoreTestInstance (Context& context,
639 const Texture& texture,
640 const VkFormat format,
641 const bool singleLayerBind);
644 VkDescriptorSetLayout prepareDescriptors (void);
645 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
646 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
647 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
649 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
650 const VkPipelineLayout pipelineLayout,
653 de::MovePtr<Image> m_image;
654 de::MovePtr<Buffer> m_constantsBuffer;
655 const VkDeviceSize m_constantsBufferChunkSizeBytes;
656 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
657 Move<VkDescriptorPool> m_descriptorPool;
658 DynArray<Move<VkDescriptorSet> > m_allDescriptorSets;
659 DynArray<Move<VkImageView> > m_allImageViews;
662 ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
663 const Texture& texture,
664 const VkFormat format,
665 const bool singleLayerBind)
666 : StoreTestInstance (context, texture, format, singleLayerBind)
667 , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context, sizeof(deUint32)))
668 , m_allDescriptorSets (texture.numLayers())
669 , m_allImageViews (texture.numLayers())
671 const DeviceInterface& vk = m_context.getDeviceInterface();
672 const VkDevice device = m_context.getDevice();
673 Allocator& allocator = m_context.getDefaultAllocator();
675 m_image = de::MovePtr<Image>(new Image(
676 vk, device, allocator,
677 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
678 MemoryRequirement::Any));
680 // This buffer will be used to pass constants to the shader
682 const int numLayers = m_texture.numLayers();
683 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
684 m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
685 vk, device, allocator,
686 makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
687 MemoryRequirement::HostVisible));
690 const Allocation& alloc = m_constantsBuffer->getAllocation();
691 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
693 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
695 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
697 deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
698 *valuePtr = static_cast<deUint32>(layerNdx);
701 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), constantsBufferSizeBytes);
705 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
707 const DeviceInterface& vk = m_context.getDeviceInterface();
708 const VkDevice device = m_context.getDevice();
710 const int numLayers = m_texture.numLayers();
711 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
712 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
713 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
716 m_descriptorPool = DescriptorPoolBuilder()
717 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
718 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
719 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
721 if (m_singleLayerBind)
723 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
725 m_allDescriptorSets[layerNdx] = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
726 m_allImageViews[layerNdx] = makeImageView(vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
727 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u));
730 else // bind all layers at once
732 m_allDescriptorSets[0] = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
733 m_allImageViews[0] = makeImageView(vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
734 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers));
737 return *m_descriptorSetLayout; // not passing the ownership
740 void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
742 const DeviceInterface& vk = m_context.getDeviceInterface();
743 const VkDevice device = m_context.getDevice();
745 const VkDescriptorSet descriptorSet = *m_allDescriptorSets[layerNdx];
746 const VkImageView imageView = *m_allImageViews[layerNdx];
748 const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
750 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
751 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
752 m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
754 DescriptorSetUpdateBuilder()
755 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
756 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
758 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
761 void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
763 const DeviceInterface& vk = m_context.getDeviceInterface();
765 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
766 const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
768 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
769 m_image->get(), fullImageSubresourceRange);
771 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
772 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
773 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
774 m_constantsBuffer->get(), 0ull, constantsBufferSize);
776 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
779 void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
781 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
784 void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
786 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
789 //! Store test for buffers
790 class BufferStoreTestInstance : public StoreTestInstance
793 BufferStoreTestInstance (Context& context,
794 const Texture& texture,
795 const VkFormat format);
798 VkDescriptorSetLayout prepareDescriptors (void);
799 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
801 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
802 const VkPipelineLayout pipelineLayout,
805 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
806 Move<VkDescriptorPool> m_descriptorPool;
807 Move<VkDescriptorSet> m_descriptorSet;
808 Move<VkBufferView> m_bufferView;
811 BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
812 const Texture& texture,
813 const VkFormat format)
814 : StoreTestInstance(context, texture, format, false)
818 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
820 const DeviceInterface& vk = m_context.getDeviceInterface();
821 const VkDevice device = m_context.getDevice();
823 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
824 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
827 m_descriptorPool = DescriptorPoolBuilder()
828 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
829 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
831 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
832 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
834 return *m_descriptorSetLayout; // not passing the ownership
837 void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
839 DE_ASSERT(layerNdx == 0);
842 const VkDevice device = m_context.getDevice();
843 const DeviceInterface& vk = m_context.getDeviceInterface();
845 DescriptorSetUpdateBuilder()
846 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
848 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
851 void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
853 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes);
856 class LoadStoreTest : public TestCase
861 FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
862 FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
865 LoadStoreTest (tcu::TestContext& testCtx,
866 const std::string& name,
867 const std::string& description,
868 const Texture& texture,
869 const VkFormat format,
870 const VkFormat imageFormat,
871 const TestFlags flags = static_cast<TestFlags>(0));
873 void initPrograms (SourceCollections& programCollection) const;
874 TestInstance* createInstance (Context& context) const;
877 const Texture m_texture;
878 const VkFormat m_format; //!< Format as accessed in the shader
879 const VkFormat m_imageFormat; //!< Storage format
880 const bool m_singleLayerBind;
881 const bool m_restrictImages;
884 LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
885 const std::string& name,
886 const std::string& description,
887 const Texture& texture,
888 const VkFormat format,
889 const VkFormat imageFormat,
890 const TestFlags flags)
891 : TestCase (testCtx, name, description)
892 , m_texture (texture)
894 , m_imageFormat (imageFormat)
895 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
896 , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
898 if (m_singleLayerBind)
899 DE_ASSERT(m_texture.numLayers() > 1);
901 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
904 void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
906 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
907 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
908 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
909 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
910 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
911 const std::string xMax = de::toString(m_texture.size().x() - 1);
913 std::ostringstream src;
914 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
916 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
917 << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform highp " << imageTypeStr << " u_image0;\n"
918 << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform highp " << imageTypeStr << " u_image1;\n"
920 << "void main (void)\n"
923 " int pos = int(gl_GlobalInvocationID.x);\n"
924 " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n"
926 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
927 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n"
929 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
930 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n"
934 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
937 //! Load/store test base implementation
938 class LoadStoreTestInstance : public BaseTestInstance
941 LoadStoreTestInstance (Context& context,
942 const Texture& texture,
943 const VkFormat format,
944 const VkFormat imageFormat,
945 const bool singleLayerBind);
948 virtual Buffer* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
950 tcu::TestStatus verifyResult (void);
952 // Add empty implementations for functions that might be not needed
953 void commandBeforeCompute (const VkCommandBuffer) {}
954 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
955 void commandAfterCompute (const VkCommandBuffer) {}
957 de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
958 const VkDeviceSize m_imageSizeBytes;
959 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
960 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
963 LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
964 const Texture& texture,
965 const VkFormat format,
966 const VkFormat imageFormat,
967 const bool singleLayerBind)
968 : BaseTestInstance (context, texture, format, singleLayerBind)
969 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
970 , m_imageFormat (imageFormat)
971 , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
973 const DeviceInterface& vk = m_context.getDeviceInterface();
974 const VkDevice device = m_context.getDevice();
975 Allocator& allocator = m_context.getDefaultAllocator();
977 // A helper buffer with enough space to hold the whole image.
979 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
980 vk, device, allocator,
981 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
982 MemoryRequirement::HostVisible));
984 // Copy reference data to buffer for subsequent upload to image.
986 const Allocation& alloc = m_imageBuffer->getAllocation();
987 deMemcpy(alloc.getHostPtr(), m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
988 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
991 tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
993 const DeviceInterface& vk = m_context.getDeviceInterface();
994 const VkDevice device = m_context.getDevice();
996 // Apply the same transformation as done in the shader
997 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
998 flipHorizontally(reference);
1000 const Allocation& alloc = getResultBuffer()->getAllocation();
1001 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
1002 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), alloc.getHostPtr());
1004 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
1005 return tcu::TestStatus::pass("Passed");
1007 return tcu::TestStatus::fail("Image comparison failed");
1010 //! Load/store test for images
1011 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
1016 PerLayerData (Move<VkDescriptorSet> descriptorSet,
1017 Move<VkImageView> imageViewSrc,
1018 Move<VkImageView> imageViewDst);
1020 const Unique<VkDescriptorSet> descriptorSet;
1021 const Unique<VkImageView> imageViewSrc;
1022 const Unique<VkImageView> imageViewDst;
1025 ImageLoadStoreTestInstance (Context& context,
1026 const Texture& texture,
1027 const VkFormat format,
1028 const VkFormat imageFormat,
1029 const bool singleLayerBind);
1032 VkDescriptorSetLayout prepareDescriptors (void);
1033 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
1034 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
1035 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1037 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1038 const VkPipelineLayout pipelineLayout,
1039 const int layerNdx);
1041 Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
1043 de::MovePtr<Image> m_imageSrc;
1044 de::MovePtr<Image> m_imageDst;
1045 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1046 Move<VkDescriptorPool> m_descriptorPool;
1047 DynArray<de::MovePtr<PerLayerData> > m_perLayerData;
1050 ImageLoadStoreTestInstance::PerLayerData::PerLayerData (Move<VkDescriptorSet> descriptorSet_,
1051 Move<VkImageView> imageViewSrc_,
1052 Move<VkImageView> imageViewDst_)
1053 : descriptorSet (descriptorSet_)
1054 , imageViewSrc (imageViewSrc_)
1055 , imageViewDst (imageViewDst_)
1059 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
1060 const Texture& texture,
1061 const VkFormat format,
1062 const VkFormat imageFormat,
1063 const bool singleLayerBind)
1064 : LoadStoreTestInstance (context, texture, format, imageFormat, singleLayerBind)
1065 , m_perLayerData (texture.numLayers())
1067 const DeviceInterface& vk = m_context.getDeviceInterface();
1068 const VkDevice device = m_context.getDevice();
1069 Allocator& allocator = m_context.getDefaultAllocator();
1070 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1072 m_imageSrc = de::MovePtr<Image>(new Image(
1073 vk, device, allocator,
1074 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
1075 MemoryRequirement::Any));
1077 m_imageDst = de::MovePtr<Image>(new Image(
1078 vk, device, allocator,
1079 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
1080 MemoryRequirement::Any));
1083 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
1085 const VkDevice device = m_context.getDevice();
1086 const DeviceInterface& vk = m_context.getDeviceInterface();
1088 const int numLayers = m_texture.numLayers();
1089 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1090 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1091 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1094 m_descriptorPool = DescriptorPoolBuilder()
1095 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1096 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1097 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1099 if (m_singleLayerBind)
1101 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1103 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1104 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
1106 de::MovePtr<PerLayerData> data(new PerLayerData(
1107 makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout),
1108 makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange),
1109 makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange)));
1111 m_perLayerData[layerNdx] = data;
1114 else // bind all layers at once
1116 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1117 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1119 de::MovePtr<PerLayerData> data(new PerLayerData(
1120 makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout),
1121 makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange),
1122 makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange)));
1124 m_perLayerData[0] = data;
1127 return *m_descriptorSetLayout; // not passing the ownership
1130 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1132 const VkDevice device = m_context.getDevice();
1133 const DeviceInterface& vk = m_context.getDeviceInterface();
1135 const PerLayerData* data = m_perLayerData[layerNdx].get();
1137 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, *data->imageViewSrc, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
1138 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, *data->imageViewDst, VK_IMAGE_LAYOUT_GENERAL);
1140 DescriptorSetUpdateBuilder()
1141 .writeSingle(*data->descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1142 .writeSingle(*data->descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1143 .update(vk, device);
1144 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &data->descriptorSet.get(), 0u, DE_NULL);
1147 void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1149 const DeviceInterface& vk = m_context.getDeviceInterface();
1151 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1153 const VkImageMemoryBarrier preCopyImageBarriers[] =
1155 makeImageMemoryBarrier(
1157 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1158 m_imageSrc->get(), fullImageSubresourceRange),
1159 makeImageMemoryBarrier(
1161 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1162 m_imageDst->get(), fullImageSubresourceRange)
1165 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1166 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1167 m_imageBuffer->get(), 0ull, m_imageSizeBytes);
1169 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1170 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1173 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1174 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1175 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
1176 m_imageSrc->get(), fullImageSubresourceRange);
1178 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1180 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1181 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1185 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1187 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1190 void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1192 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1195 //! Load/store test for buffers
1196 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1199 BufferLoadStoreTestInstance (Context& context,
1200 const Texture& texture,
1201 const VkFormat format,
1202 const VkFormat imageFormat);
1205 VkDescriptorSetLayout prepareDescriptors (void);
1206 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1208 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1209 const VkPipelineLayout pipelineLayout,
1210 const int layerNdx);
1212 Buffer* getResultBuffer (void) const { return m_imageBufferDst.get(); }
1214 de::MovePtr<Buffer> m_imageBufferDst;
1215 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1216 Move<VkDescriptorPool> m_descriptorPool;
1217 Move<VkDescriptorSet> m_descriptorSet;
1218 Move<VkBufferView> m_bufferViewSrc;
1219 Move<VkBufferView> m_bufferViewDst;
1222 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
1223 const Texture& texture,
1224 const VkFormat format,
1225 const VkFormat imageFormat)
1226 : LoadStoreTestInstance(context, texture, format, imageFormat, false)
1228 const DeviceInterface& vk = m_context.getDeviceInterface();
1229 const VkDevice device = m_context.getDevice();
1230 Allocator& allocator = m_context.getDefaultAllocator();
1232 // Create a destination buffer.
1234 m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
1235 vk, device, allocator,
1236 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1237 MemoryRequirement::HostVisible));
1240 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1242 const DeviceInterface& vk = m_context.getDeviceInterface();
1243 const VkDevice device = m_context.getDevice();
1245 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1246 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1247 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1250 m_descriptorPool = DescriptorPoolBuilder()
1251 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1252 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1253 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1255 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1256 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
1257 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), m_format, 0ull, m_imageSizeBytes);
1259 return *m_descriptorSetLayout; // not passing the ownership
1262 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1264 DE_ASSERT(layerNdx == 0);
1267 const VkDevice device = m_context.getDevice();
1268 const DeviceInterface& vk = m_context.getDeviceInterface();
1270 DescriptorSetUpdateBuilder()
1271 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewSrc.get())
1272 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
1273 .update(vk, device);
1274 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1277 void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1279 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes);
1282 TestInstance* StoreTest::createInstance (Context& context) const
1284 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1285 return new BufferStoreTestInstance(context, m_texture, m_format);
1287 return new ImageStoreTestInstance(context, m_texture, m_format, m_singleLayerBind);
1290 TestInstance* LoadStoreTest::createInstance (Context& context) const
1292 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1293 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat);
1295 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_singleLayerBind);
1298 // TODO Which image/format combinations should be supported? Spec says it should be queried with vkGetPhysicalDeviceImageFormatProperties.
1299 // What about buffer/format? (texel storage buffer) (use vkGetPhysicalDeviceFormatProperties ?)
1301 static const Texture s_textures[] =
1303 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
1304 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
1305 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
1306 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
1307 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
1308 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
1309 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
1310 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
1313 const Texture& getTestTexture (const ImageType imageType)
1315 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1316 if (s_textures[textureNdx].type() == imageType)
1317 return s_textures[textureNdx];
1319 DE_FATAL("Internal error");
1320 return s_textures[0];
1323 static const VkFormat s_formats[] =
1325 VK_FORMAT_R32G32B32A32_SFLOAT,
1326 VK_FORMAT_R16G16B16A16_SFLOAT,
1327 VK_FORMAT_R32_SFLOAT,
1329 VK_FORMAT_R32G32B32A32_UINT,
1330 VK_FORMAT_R16G16B16A16_UINT,
1331 VK_FORMAT_R8G8B8A8_UINT,
1334 VK_FORMAT_R32G32B32A32_SINT,
1335 VK_FORMAT_R16G16B16A16_SINT,
1336 VK_FORMAT_R8G8B8A8_SINT,
1339 VK_FORMAT_R8G8B8A8_UNORM,
1341 VK_FORMAT_R8G8B8A8_SNORM,
1346 tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
1348 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
1350 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1352 const Texture& texture = s_textures[textureNdx];
1353 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1354 const bool isLayered = (texture.numLayers() > 1);
1356 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1358 groupByImageViewType->addChild(new StoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
1361 groupByImageViewType->addChild(new StoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]) + "_single_layer", "",
1362 texture, s_formats[formatNdx], StoreTest::FLAG_SINGLE_LAYER_BIND));
1364 testGroup->addChild(groupByImageViewType.release());
1367 return testGroup.release();
1370 tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
1372 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
1374 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1376 const Texture& texture = s_textures[textureNdx];
1377 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1378 const bool isLayered = (texture.numLayers() > 1);
1380 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1382 groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]), "",
1383 texture, s_formats[formatNdx], s_formats[formatNdx]));
1386 groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatCaseName(s_formats[formatNdx]) + "_single_layer", "",
1387 texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_SINGLE_LAYER_BIND));
1389 testGroup->addChild(groupByImageViewType.release());
1392 return testGroup.release();
1395 tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
1397 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
1399 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1401 const Texture& texture = s_textures[textureNdx];
1402 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1404 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
1405 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1407 //TODO Are all conversions valid or do we have to limit (or expand) somehow? Is it stated anywhere in the spec?
1409 const std::string caseName = getFormatCaseName(s_formats[imageFormatNdx]) + "_" + getFormatCaseName(s_formats[formatNdx]);
1410 if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
1411 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
1413 testGroup->addChild(groupByImageViewType.release());
1416 return testGroup.release();
1419 de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
1421 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
1422 const Texture& texture = getTestTexture(imageType);
1423 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES));