1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktImageLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkImageUtil.hpp"
41 #include "deUniquePtr.hpp"
42 #include "deSharedPtr.hpp"
43 #include "deStringUtil.hpp"
45 #include "tcuImageCompare.hpp"
46 #include "tcuTexture.hpp"
47 #include "tcuTextureUtil.hpp"
48 #include "tcuFloat.hpp"
62 inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
64 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
67 tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
69 switch (texture.type())
73 case IMAGE_TYPE_BUFFER:
75 DE_ASSERT(layer == 0);
78 case IMAGE_TYPE_1D_ARRAY:
79 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
81 case IMAGE_TYPE_2D_ARRAY:
83 case IMAGE_TYPE_CUBE_ARRAY:
84 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
85 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
88 DE_FATAL("Internal test error");
89 return tcu::ConstPixelBufferAccess();
93 //! \return true if all layers match in both pixel buffers
94 bool comparePixelBuffers (tcu::TestLog& log,
95 const Texture& texture,
96 const VkFormat format,
97 const tcu::ConstPixelBufferAccess reference,
98 const tcu::ConstPixelBufferAccess result)
100 DE_ASSERT(reference.getFormat() == result.getFormat());
101 DE_ASSERT(reference.getSize() == result.getSize());
103 const bool intFormat = isIntegerFormat(format);
104 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
105 const int numLayersOrSlices = (is3d ? texture.size().z() : texture.numLayers());
106 const int numCubeFaces = 6;
108 int passedLayers = 0;
109 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
111 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
112 const std::string comparisonDesc = "Image Comparison, " +
113 (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
114 is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx));
116 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
117 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
121 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
123 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
128 return passedLayers == numLayersOrSlices;
131 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
132 void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
134 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
136 for (int z = 0; z < access.getDepth(); ++z)
137 for (int y = 0; y < access.getHeight(); ++y)
138 for (int x = 0; x < access.getWidth(); ++x)
140 const tcu::Vec4 color(access.getPixel(x, y, z));
141 tcu::Vec4 newColor = color;
143 for (int i = 0; i < 4; ++i)
145 if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
147 const tcu::Float16 f(color[i]);
148 if (f.isDenorm() || f.isInf() || f.isNaN())
153 const tcu::Float32 f(color[i]);
154 if (f.isDenorm() || f.isInf() || f.isNaN())
159 if (newColor != color)
160 access.setPixel(newColor, x, y, z);
164 //!< replace invalid pixels in the image (-128)
165 void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
167 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
169 for (int z = 0; z < access.getDepth(); ++z)
170 for (int y = 0; y < access.getHeight(); ++y)
171 for (int x = 0; x < access.getWidth(); ++x)
173 const tcu::IVec4 color(access.getPixelInt(x, y, z));
174 tcu::IVec4 newColor = color;
176 for (int i = 0; i < 4; ++i)
178 const deInt32 oldColor(color[i]);
179 if (oldColor == -128) newColor[i] = -127;
182 if (newColor != color)
183 access.setPixel(newColor, x, y, z);
187 tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
189 // Generate a reference image data using the storage format
191 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
192 const tcu::PixelBufferAccess access = reference.getAccess();
194 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
195 const float storeColorBias = computeStoreColorBias(imageFormat);
197 const bool intFormat = isIntegerFormat(imageFormat);
198 const int xMax = imageSize.x() - 1;
199 const int yMax = imageSize.y() - 1;
201 for (int z = 0; z < imageSize.z(); ++z)
202 for (int y = 0; y < imageSize.y(); ++y)
203 for (int x = 0; x < imageSize.x(); ++x)
205 const tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
208 access.setPixel(color, x, y, z);
210 access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
213 // If the image is to be accessed as a float texture, get rid of invalid values
215 if (isFloatFormat(readFormat) && imageFormat != readFormat)
216 replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
217 if (isSnormFormat(readFormat) && imageFormat != readFormat)
218 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
223 inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
225 return generateReferenceImage(imageSize, imageFormat, imageFormat);
228 void flipHorizontally (const tcu::PixelBufferAccess access)
230 const int xMax = access.getWidth() - 1;
231 const int halfWidth = access.getWidth() / 2;
233 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
234 for (int z = 0; z < access.getDepth(); z++)
235 for (int y = 0; y < access.getHeight(); y++)
236 for (int x = 0; x < halfWidth; x++)
238 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
239 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
240 access.setPixel(temp, x, y, z);
243 for (int z = 0; z < access.getDepth(); z++)
244 for (int y = 0; y < access.getHeight(); y++)
245 for (int x = 0; x < halfWidth; x++)
247 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
248 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
249 access.setPixel(temp, x, y, z);
253 inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
255 return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
258 void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
260 const DeviceInterface& vk = context.getDeviceInterface();
262 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
263 const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
264 VK_ACCESS_SHADER_WRITE_BIT, 0u,
265 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
266 image, fullImageSubresourceRange);
268 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
271 void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
273 const DeviceInterface& vk = context.getDeviceInterface();
275 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
276 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
277 buffer, 0ull, bufferSizeBytes);
279 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
282 //! Copy all layers of an image to a buffer.
283 void commandCopyImageToBuffer (Context& context,
284 const VkCommandBuffer cmdBuffer,
286 const VkBuffer buffer,
287 const VkDeviceSize bufferSizeBytes,
288 const Texture& texture)
290 const DeviceInterface& vk = context.getDeviceInterface();
292 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
293 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
294 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
295 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
296 image, fullImageSubresourceRange);
298 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
300 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
301 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
302 buffer, 0ull, bufferSizeBytes);
304 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
305 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
306 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
309 class StoreTest : public TestCase
314 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
317 StoreTest (tcu::TestContext& testCtx,
318 const std::string& name,
319 const std::string& description,
320 const Texture& texture,
321 const VkFormat format,
322 const TestFlags flags = static_cast<TestFlags>(0));
324 void initPrograms (SourceCollections& programCollection) const;
326 TestInstance* createInstance (Context& context) const;
329 const Texture m_texture;
330 const VkFormat m_format;
331 const bool m_singleLayerBind;
334 StoreTest::StoreTest (tcu::TestContext& testCtx,
335 const std::string& name,
336 const std::string& description,
337 const Texture& texture,
338 const VkFormat format,
339 const TestFlags flags)
340 : TestCase (testCtx, name, description)
341 , m_texture (texture)
343 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
345 if (m_singleLayerBind)
346 DE_ASSERT(m_texture.numLayers() > 1);
349 void StoreTest::initPrograms (SourceCollections& programCollection) const
351 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
352 const float storeColorBias = computeStoreColorBias(m_format);
353 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
355 const std::string xMax = de::toString(m_texture.size().x() - 1);
356 const std::string yMax = de::toString(m_texture.size().y() - 1);
357 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
358 const std::string colorBaseExpr = signednessPrefix + "vec4("
360 + "(" + xMax + "-gx)^gy^gz, "
361 + "gx^(" + yMax + "-gy)^gz, "
362 + "(" + xMax + "-gx)^(" + yMax + "-gy)^gz)";
364 const std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
365 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
367 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
368 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
370 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
371 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
372 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
374 std::ostringstream src;
375 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
377 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
378 << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr << " u_image;\n";
380 if (m_singleLayerBind)
381 src << "layout (binding = 1) readonly uniform Constants {\n"
382 << " int u_layerNdx;\n"
386 << "void main (void)\n"
388 << " int gx = int(gl_GlobalInvocationID.x);\n"
389 << " int gy = int(gl_GlobalInvocationID.y);\n"
390 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
391 << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
394 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
397 //! Generic test iteration algorithm for image tests
398 class BaseTestInstance : public TestInstance
401 BaseTestInstance (Context& context,
402 const Texture& texture,
403 const VkFormat format,
404 const bool singleLayerBind);
406 tcu::TestStatus iterate (void);
408 virtual ~BaseTestInstance (void) {}
411 virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
412 virtual tcu::TestStatus verifyResult (void) = 0;
414 virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
415 virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
416 virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
418 virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
419 const VkPipelineLayout pipelineLayout,
420 const int layerNdx) = 0;
422 const Texture m_texture;
423 const VkFormat m_format;
424 const bool m_singleLayerBind;
427 BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
428 : TestInstance (context)
429 , m_texture (texture)
431 , m_singleLayerBind (singleLayerBind)
435 tcu::TestStatus BaseTestInstance::iterate (void)
437 const DeviceInterface& vk = m_context.getDeviceInterface();
438 const VkDevice device = m_context.getDevice();
439 const VkQueue queue = m_context.getUniversalQueue();
440 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
442 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get("comp"), 0));
444 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
445 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
446 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
448 const Unique<VkCommandPool> cmdPool(makeCommandPool(vk, device, queueFamilyIndex));
449 const Unique<VkCommandBuffer> cmdBuffer(makeCommandBuffer(vk, device, *cmdPool));
451 beginCommandBuffer(vk, *cmdBuffer);
453 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
454 commandBeforeCompute(*cmdBuffer);
456 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
457 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
458 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
460 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
463 commandBetweenShaderInvocations(*cmdBuffer);
465 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
468 commandAfterCompute(*cmdBuffer);
470 endCommandBuffer(vk, *cmdBuffer);
472 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
474 return verifyResult();
477 //! Base store test implementation
478 class StoreTestInstance : public BaseTestInstance
481 StoreTestInstance (Context& context,
482 const Texture& texture,
483 const VkFormat format,
484 const bool singleLayerBind);
487 tcu::TestStatus verifyResult (void);
489 // Add empty implementations for functions that might be not needed
490 void commandBeforeCompute (const VkCommandBuffer) {}
491 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
492 void commandAfterCompute (const VkCommandBuffer) {}
494 de::MovePtr<Buffer> m_imageBuffer;
495 const VkDeviceSize m_imageSizeBytes;
498 StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool singleLayerBind)
499 : BaseTestInstance (context, texture, format, singleLayerBind)
500 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
502 const DeviceInterface& vk = m_context.getDeviceInterface();
503 const VkDevice device = m_context.getDevice();
504 Allocator& allocator = m_context.getDefaultAllocator();
506 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
508 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
509 vk, device, allocator,
510 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
511 MemoryRequirement::HostVisible));
514 tcu::TestStatus StoreTestInstance::verifyResult (void)
516 const DeviceInterface& vk = m_context.getDeviceInterface();
517 const VkDevice device = m_context.getDevice();
519 const tcu::IVec3 imageSize = m_texture.size();
520 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
522 const Allocation& alloc = m_imageBuffer->getAllocation();
523 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
524 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, alloc.getHostPtr());
526 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
527 return tcu::TestStatus::pass("Passed");
529 return tcu::TestStatus::fail("Image comparison failed");
532 //! Store test for images
533 class ImageStoreTestInstance : public StoreTestInstance
536 ImageStoreTestInstance (Context& context,
537 const Texture& texture,
538 const VkFormat format,
539 const bool singleLayerBind);
542 VkDescriptorSetLayout prepareDescriptors (void);
543 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
544 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
545 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
547 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
548 const VkPipelineLayout pipelineLayout,
551 de::MovePtr<Image> m_image;
552 de::MovePtr<Buffer> m_constantsBuffer;
553 const VkDeviceSize m_constantsBufferChunkSizeBytes;
554 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
555 Move<VkDescriptorPool> m_descriptorPool;
556 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
557 std::vector<SharedVkImageView> m_allImageViews;
560 ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
561 const Texture& texture,
562 const VkFormat format,
563 const bool singleLayerBind)
564 : StoreTestInstance (context, texture, format, singleLayerBind)
565 , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context.getInstanceInterface(), context.getPhysicalDevice(), sizeof(deUint32)))
566 , m_allDescriptorSets (texture.numLayers())
567 , m_allImageViews (texture.numLayers())
569 const DeviceInterface& vk = m_context.getDeviceInterface();
570 const VkDevice device = m_context.getDevice();
571 Allocator& allocator = m_context.getDefaultAllocator();
573 m_image = de::MovePtr<Image>(new Image(
574 vk, device, allocator,
575 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
576 MemoryRequirement::Any));
578 // This buffer will be used to pass constants to the shader
580 const int numLayers = m_texture.numLayers();
581 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
582 m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
583 vk, device, allocator,
584 makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
585 MemoryRequirement::HostVisible));
588 const Allocation& alloc = m_constantsBuffer->getAllocation();
589 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
591 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
593 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
595 deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
596 *valuePtr = static_cast<deUint32>(layerNdx);
599 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), constantsBufferSizeBytes);
603 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
605 const DeviceInterface& vk = m_context.getDeviceInterface();
606 const VkDevice device = m_context.getDevice();
608 const int numLayers = m_texture.numLayers();
609 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
610 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
611 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
614 m_descriptorPool = DescriptorPoolBuilder()
615 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
616 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
617 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
619 if (m_singleLayerBind)
621 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
623 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
624 m_allImageViews[layerNdx] = makeVkSharedPtr(makeImageView(
625 vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
626 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
629 else // bind all layers at once
631 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
632 m_allImageViews[0] = makeVkSharedPtr(makeImageView(
633 vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
634 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
637 return *m_descriptorSetLayout; // not passing the ownership
640 void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
642 const DeviceInterface& vk = m_context.getDeviceInterface();
643 const VkDevice device = m_context.getDevice();
645 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
646 const VkImageView imageView = **m_allImageViews[layerNdx];
648 const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
650 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
651 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
652 m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
654 DescriptorSetUpdateBuilder()
655 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
656 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
658 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
661 void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
663 const DeviceInterface& vk = m_context.getDeviceInterface();
665 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
666 const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
668 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
669 m_image->get(), fullImageSubresourceRange);
671 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
672 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
673 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
674 m_constantsBuffer->get(), 0ull, constantsBufferSize);
676 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
679 void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
681 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
684 void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
686 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
689 //! Store test for buffers
690 class BufferStoreTestInstance : public StoreTestInstance
693 BufferStoreTestInstance (Context& context,
694 const Texture& texture,
695 const VkFormat format);
698 VkDescriptorSetLayout prepareDescriptors (void);
699 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
701 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
702 const VkPipelineLayout pipelineLayout,
705 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
706 Move<VkDescriptorPool> m_descriptorPool;
707 Move<VkDescriptorSet> m_descriptorSet;
708 Move<VkBufferView> m_bufferView;
711 BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
712 const Texture& texture,
713 const VkFormat format)
714 : StoreTestInstance(context, texture, format, false)
718 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
720 const DeviceInterface& vk = m_context.getDeviceInterface();
721 const VkDevice device = m_context.getDevice();
723 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
724 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
727 m_descriptorPool = DescriptorPoolBuilder()
728 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
729 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
731 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
732 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
734 return *m_descriptorSetLayout; // not passing the ownership
737 void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
739 DE_ASSERT(layerNdx == 0);
742 const VkDevice device = m_context.getDevice();
743 const DeviceInterface& vk = m_context.getDeviceInterface();
745 DescriptorSetUpdateBuilder()
746 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
748 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
751 void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
753 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes);
756 class LoadStoreTest : public TestCase
761 FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
762 FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
765 LoadStoreTest (tcu::TestContext& testCtx,
766 const std::string& name,
767 const std::string& description,
768 const Texture& texture,
769 const VkFormat format,
770 const VkFormat imageFormat,
771 const TestFlags flags = static_cast<TestFlags>(0));
773 void initPrograms (SourceCollections& programCollection) const;
774 TestInstance* createInstance (Context& context) const;
777 const Texture m_texture;
778 const VkFormat m_format; //!< Format as accessed in the shader
779 const VkFormat m_imageFormat; //!< Storage format
780 const bool m_singleLayerBind;
781 const bool m_restrictImages;
784 LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
785 const std::string& name,
786 const std::string& description,
787 const Texture& texture,
788 const VkFormat format,
789 const VkFormat imageFormat,
790 const TestFlags flags)
791 : TestCase (testCtx, name, description)
792 , m_texture (texture)
794 , m_imageFormat (imageFormat)
795 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
796 , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
798 if (m_singleLayerBind)
799 DE_ASSERT(m_texture.numLayers() > 1);
801 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
804 void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
806 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
807 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
808 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
809 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
810 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
811 const std::string xMax = de::toString(m_texture.size().x() - 1);
813 std::ostringstream src;
814 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
816 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
817 << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n"
818 << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n"
820 << "void main (void)\n"
823 " int pos = int(gl_GlobalInvocationID.x);\n"
824 " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n"
826 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
827 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n"
829 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
830 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n"
834 programCollection.glslSources.add("comp") << glu::ComputeSource(src.str());
837 //! Load/store test base implementation
838 class LoadStoreTestInstance : public BaseTestInstance
841 LoadStoreTestInstance (Context& context,
842 const Texture& texture,
843 const VkFormat format,
844 const VkFormat imageFormat,
845 const bool singleLayerBind);
848 virtual Buffer* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
850 tcu::TestStatus verifyResult (void);
852 // Add empty implementations for functions that might be not needed
853 void commandBeforeCompute (const VkCommandBuffer) {}
854 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
855 void commandAfterCompute (const VkCommandBuffer) {}
857 de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
858 const VkDeviceSize m_imageSizeBytes;
859 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
860 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
863 LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
864 const Texture& texture,
865 const VkFormat format,
866 const VkFormat imageFormat,
867 const bool singleLayerBind)
868 : BaseTestInstance (context, texture, format, singleLayerBind)
869 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
870 , m_imageFormat (imageFormat)
871 , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
873 const DeviceInterface& vk = m_context.getDeviceInterface();
874 const VkDevice device = m_context.getDevice();
875 Allocator& allocator = m_context.getDefaultAllocator();
877 // A helper buffer with enough space to hold the whole image.
879 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
880 vk, device, allocator,
881 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
882 MemoryRequirement::HostVisible));
884 // Copy reference data to buffer for subsequent upload to image.
886 const Allocation& alloc = m_imageBuffer->getAllocation();
887 deMemcpy(alloc.getHostPtr(), m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
888 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
891 tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
893 const DeviceInterface& vk = m_context.getDeviceInterface();
894 const VkDevice device = m_context.getDevice();
896 // Apply the same transformation as done in the shader
897 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
898 flipHorizontally(reference);
900 const Allocation& alloc = getResultBuffer()->getAllocation();
901 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), m_imageSizeBytes);
902 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), alloc.getHostPtr());
904 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
905 return tcu::TestStatus::pass("Passed");
907 return tcu::TestStatus::fail("Image comparison failed");
910 //! Load/store test for images
911 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
914 ImageLoadStoreTestInstance (Context& context,
915 const Texture& texture,
916 const VkFormat format,
917 const VkFormat imageFormat,
918 const bool singleLayerBind);
921 VkDescriptorSetLayout prepareDescriptors (void);
922 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
923 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
924 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
926 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
927 const VkPipelineLayout pipelineLayout,
930 Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
932 de::MovePtr<Image> m_imageSrc;
933 de::MovePtr<Image> m_imageDst;
934 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
935 Move<VkDescriptorPool> m_descriptorPool;
936 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
937 std::vector<SharedVkImageView> m_allSrcImageViews;
938 std::vector<SharedVkImageView> m_allDstImageViews;
941 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
942 const Texture& texture,
943 const VkFormat format,
944 const VkFormat imageFormat,
945 const bool singleLayerBind)
946 : LoadStoreTestInstance (context, texture, format, imageFormat, singleLayerBind)
947 , m_allDescriptorSets (texture.numLayers())
948 , m_allSrcImageViews (texture.numLayers())
949 , m_allDstImageViews (texture.numLayers())
951 const DeviceInterface& vk = m_context.getDeviceInterface();
952 const VkDevice device = m_context.getDevice();
953 Allocator& allocator = m_context.getDefaultAllocator();
954 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
956 m_imageSrc = de::MovePtr<Image>(new Image(
957 vk, device, allocator,
958 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
959 MemoryRequirement::Any));
961 m_imageDst = de::MovePtr<Image>(new Image(
962 vk, device, allocator,
963 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
964 MemoryRequirement::Any));
967 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
969 const VkDevice device = m_context.getDevice();
970 const DeviceInterface& vk = m_context.getDeviceInterface();
972 const int numLayers = m_texture.numLayers();
973 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
974 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
975 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
978 m_descriptorPool = DescriptorPoolBuilder()
979 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
980 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
981 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
983 if (m_singleLayerBind)
985 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
987 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
988 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
990 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
991 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
992 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
995 else // bind all layers at once
997 const VkImageViewType viewType = mapImageViewType(m_texture.type());
998 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1000 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1001 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1002 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1005 return *m_descriptorSetLayout; // not passing the ownership
1008 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1010 const VkDevice device = m_context.getDevice();
1011 const DeviceInterface& vk = m_context.getDeviceInterface();
1013 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1014 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1015 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1017 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1018 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1020 DescriptorSetUpdateBuilder()
1021 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1022 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1023 .update(vk, device);
1024 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1027 void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1029 const DeviceInterface& vk = m_context.getDeviceInterface();
1031 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1033 const VkImageMemoryBarrier preCopyImageBarriers[] =
1035 makeImageMemoryBarrier(
1036 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1037 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1038 m_imageSrc->get(), fullImageSubresourceRange),
1039 makeImageMemoryBarrier(
1040 0u, VK_ACCESS_SHADER_WRITE_BIT,
1041 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1042 m_imageDst->get(), fullImageSubresourceRange)
1045 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1046 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1047 m_imageBuffer->get(), 0ull, m_imageSizeBytes);
1049 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1050 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1053 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1054 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1055 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1056 m_imageSrc->get(), fullImageSubresourceRange);
1058 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1060 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1061 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1065 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1067 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1070 void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1072 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1075 //! Load/store test for buffers
1076 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1079 BufferLoadStoreTestInstance (Context& context,
1080 const Texture& texture,
1081 const VkFormat format,
1082 const VkFormat imageFormat);
1085 VkDescriptorSetLayout prepareDescriptors (void);
1086 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1088 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1089 const VkPipelineLayout pipelineLayout,
1090 const int layerNdx);
1092 Buffer* getResultBuffer (void) const { return m_imageBufferDst.get(); }
1094 de::MovePtr<Buffer> m_imageBufferDst;
1095 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1096 Move<VkDescriptorPool> m_descriptorPool;
1097 Move<VkDescriptorSet> m_descriptorSet;
1098 Move<VkBufferView> m_bufferViewSrc;
1099 Move<VkBufferView> m_bufferViewDst;
1102 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
1103 const Texture& texture,
1104 const VkFormat format,
1105 const VkFormat imageFormat)
1106 : LoadStoreTestInstance(context, texture, format, imageFormat, false)
1108 const DeviceInterface& vk = m_context.getDeviceInterface();
1109 const VkDevice device = m_context.getDevice();
1110 Allocator& allocator = m_context.getDefaultAllocator();
1112 // Create a destination buffer.
1114 m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
1115 vk, device, allocator,
1116 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1117 MemoryRequirement::HostVisible));
1120 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1122 const DeviceInterface& vk = m_context.getDeviceInterface();
1123 const VkDevice device = m_context.getDevice();
1125 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1126 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1127 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1130 m_descriptorPool = DescriptorPoolBuilder()
1131 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1132 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1133 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1135 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1136 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
1137 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), m_format, 0ull, m_imageSizeBytes);
1139 return *m_descriptorSetLayout; // not passing the ownership
1142 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1144 DE_ASSERT(layerNdx == 0);
1147 const VkDevice device = m_context.getDevice();
1148 const DeviceInterface& vk = m_context.getDeviceInterface();
1150 DescriptorSetUpdateBuilder()
1151 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewSrc.get())
1152 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
1153 .update(vk, device);
1154 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1157 void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1159 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes);
1162 TestInstance* StoreTest::createInstance (Context& context) const
1164 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1165 return new BufferStoreTestInstance(context, m_texture, m_format);
1167 return new ImageStoreTestInstance(context, m_texture, m_format, m_singleLayerBind);
1170 TestInstance* LoadStoreTest::createInstance (Context& context) const
1172 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1173 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat);
1175 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_singleLayerBind);
1178 static const Texture s_textures[] =
1180 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
1181 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
1182 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
1183 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
1184 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
1185 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
1186 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
1187 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
1190 const Texture& getTestTexture (const ImageType imageType)
1192 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1193 if (s_textures[textureNdx].type() == imageType)
1194 return s_textures[textureNdx];
1196 DE_FATAL("Internal error");
1197 return s_textures[0];
1200 static const VkFormat s_formats[] =
1202 VK_FORMAT_R32G32B32A32_SFLOAT,
1203 VK_FORMAT_R16G16B16A16_SFLOAT,
1204 VK_FORMAT_R32_SFLOAT,
1206 VK_FORMAT_R32G32B32A32_UINT,
1207 VK_FORMAT_R16G16B16A16_UINT,
1208 VK_FORMAT_R8G8B8A8_UINT,
1211 VK_FORMAT_R32G32B32A32_SINT,
1212 VK_FORMAT_R16G16B16A16_SINT,
1213 VK_FORMAT_R8G8B8A8_SINT,
1216 VK_FORMAT_R8G8B8A8_UNORM,
1218 VK_FORMAT_R8G8B8A8_SNORM,
1223 tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
1225 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
1227 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1229 const Texture& texture = s_textures[textureNdx];
1230 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1231 const bool isLayered = (texture.numLayers() > 1);
1233 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1235 groupByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
1238 groupByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
1239 texture, s_formats[formatNdx], StoreTest::FLAG_SINGLE_LAYER_BIND));
1241 testGroup->addChild(groupByImageViewType.release());
1244 return testGroup.release();
1247 tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
1249 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
1251 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1253 const Texture& texture = s_textures[textureNdx];
1254 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1255 const bool isLayered = (texture.numLayers() > 1);
1257 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1259 groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "",
1260 texture, s_formats[formatNdx], s_formats[formatNdx]));
1263 groupByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
1264 texture, s_formats[formatNdx], s_formats[formatNdx], LoadStoreTest::FLAG_SINGLE_LAYER_BIND));
1266 testGroup->addChild(groupByImageViewType.release());
1269 return testGroup.release();
1272 tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
1274 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
1276 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1278 const Texture& texture = s_textures[textureNdx];
1279 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1281 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
1282 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1284 const std::string caseName = getFormatShortString(s_formats[imageFormatNdx]) + "_" + getFormatShortString(s_formats[formatNdx]);
1285 if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
1286 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
1288 testGroup->addChild(groupByImageViewType.release());
1291 return testGroup.release();
1294 de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
1296 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
1297 const Texture& texture = getTestTexture(imageType);
1298 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES));