1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Multisampled image load/store Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktImageMultisampleLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkImageUtil.hpp"
41 #include "deUniquePtr.hpp"
43 #include "tcuTextureUtil.hpp"
44 #include "tcuTestLog.hpp"
60 static const VkFormat CHECKSUM_IMAGE_FORMAT = VK_FORMAT_R32_SINT;
66 VkSampleCountFlagBits numSamples;
70 // Multisampled storage image test.
72 // Pass 1: Write a slightly different color pattern per-sample to the whole image.
73 // Pass 2: Read samples of the same image and check if color values are in the expected range.
74 // Write back results as a checksum image and verify them on the host.
75 // Each checksum image pixel should contain an integer equal to the number of samples.
77 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
79 const int dimension = (caseDef.singleLayerBind ? caseDef.texture.layerDimension() : caseDef.texture.dimension());
80 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
82 const ImageType usedImageType = (caseDef.singleLayerBind ? getImageTypeForSingleLayer(caseDef.texture.type()) : caseDef.texture.type());
83 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(caseDef.format));
84 const std::string msImageTypeStr = getShaderImageType(mapVkFormat(caseDef.format), usedImageType, (caseDef.texture.numSamples() > 1));
86 const std::string xMax = de::toString(caseDef.texture.size().x() - 1);
87 const std::string yMax = de::toString(caseDef.texture.size().y() - 1);
88 const std::string signednessPrefix = isUintFormat(caseDef.format) ? "u" : isIntFormat(caseDef.format) ? "i" : "";
89 const std::string gvec4Expr = signednessPrefix + "vec4";
90 const int numColorComponents = tcu::getNumUsedChannels(mapVkFormat(caseDef.format).order);
92 const float storeColorScale = computeStoreColorScale(caseDef.format, caseDef.texture.size());
93 const float storeColorBias = computeStoreColorBias(caseDef.format);
94 DE_ASSERT(colorScaleAndBiasAreValid(caseDef.format, storeColorScale, storeColorBias));
96 const std::string colorScaleExpr = (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
97 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
98 const std::string colorExpr =
100 + "gx^gy^gz^(sampleNdx >> 5)^(sampleNdx & 31), " // we "split" sampleNdx to keep this value in [0, 31] range for numSamples = 64 case
101 + (numColorComponents > 1 ? "(" + xMax + "-gx)^gy^gz, " : "0, ")
102 + (numColorComponents > 2 ? "gx^(" + yMax + "-gy)^gz, " : "0, ")
103 + (numColorComponents > 3 ? "(" + xMax + "-gx)^(" + yMax + "-gy)^gz" : "1")
104 + ")" + colorScaleExpr;
108 std::ostringstream src;
109 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
111 << "layout(local_size_x = 1) in;\n"
112 << "layout(set = 0, binding = 1, " << formatQualifierStr << ") writeonly uniform " << msImageTypeStr << " u_msImage;\n";
114 if (caseDef.singleLayerBind)
115 src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
116 << " int u_layerNdx;\n"
120 << "void main (void)\n"
122 << " int gx = int(gl_GlobalInvocationID.x);\n"
123 << " int gy = int(gl_GlobalInvocationID.y);\n"
124 << " int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
126 << " for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
127 << " imageStore(u_msImage, " << texelCoordStr << ", sampleNdx, " << colorExpr << ");\n"
131 programCollection.glslSources.add("comp_store") << glu::ComputeSource(src.str());
136 const tcu::TextureFormat checksumFormat = mapVkFormat(CHECKSUM_IMAGE_FORMAT);
137 const std::string checksumImageTypeStr = getShaderImageType(checksumFormat, usedImageType);
138 const bool useExactCompare = isIntegerFormat(caseDef.format);
140 std::ostringstream src;
141 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
143 << "layout(local_size_x = 1) in;\n"
144 << "layout(set = 0, binding = 1, " << formatQualifierStr << ") readonly uniform " << msImageTypeStr << " u_msImage;\n"
145 << "layout(set = 0, binding = 2, " << getShaderImageFormatQualifier(checksumFormat) << ") writeonly uniform " << checksumImageTypeStr << " u_checksumImage;\n";
147 if (caseDef.singleLayerBind)
148 src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
149 << " int u_layerNdx;\n"
153 << "void main (void)\n"
155 << " int gx = int(gl_GlobalInvocationID.x);\n"
156 << " int gy = int(gl_GlobalInvocationID.y);\n"
157 << " int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
159 << " int checksum = 0;\n"
160 << " for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
161 << " " << gvec4Expr << " color = imageLoad(u_msImage, " << texelCoordStr << ", sampleNdx);\n";
164 src << " if (color == " << colorExpr << ")\n"
167 src << " " << gvec4Expr << " diff = abs(abs(color) - abs(" << colorExpr << "));\n"
168 << " if (all(lessThan(diff, " << gvec4Expr << "(0.02))))\n"
173 << " imageStore(u_checksumImage, " << texelCoordStr << ", ivec4(checksum));\n"
176 programCollection.glslSources.add("comp_load") << glu::ComputeSource(src.str());
180 void checkRequirements (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const CaseDef& caseDef)
182 VkPhysicalDeviceFeatures features;
183 vki.getPhysicalDeviceFeatures(physDevice, &features);
185 if (!features.shaderStorageImageMultisample)
186 TCU_THROW(NotSupportedError, "Multisampled storage images are not supported");
188 VkImageFormatProperties imageFormatProperties;
189 const VkResult imageFormatResult = vki.getPhysicalDeviceImageFormatProperties(
190 physDevice, caseDef.format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_STORAGE_BIT, (VkImageCreateFlags)0, &imageFormatProperties);
192 if (imageFormatResult == VK_ERROR_FORMAT_NOT_SUPPORTED)
193 TCU_THROW(NotSupportedError, "Format is not supported");
195 if ((imageFormatProperties.sampleCounts & caseDef.numSamples) != caseDef.numSamples)
196 TCU_THROW(NotSupportedError, "Requested sample count is not supported");
199 //! Helper function to deal with per-layer resources.
200 void insertImageViews (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkFormat format, const VkImage image, std::vector<SharedVkImageView>* const pOutImageViews)
202 if (caseDef.singleLayerBind)
204 pOutImageViews->clear();
205 pOutImageViews->resize(caseDef.texture.numLayers());
206 for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
208 (*pOutImageViews)[layerNdx] = makeVkSharedPtr(makeImageView(
209 vk, device, image, mapImageViewType(getImageTypeForSingleLayer(caseDef.texture.type())), format,
210 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
213 else // bind all layers at once
215 pOutImageViews->clear();
216 pOutImageViews->resize(1);
217 (*pOutImageViews)[0] = makeVkSharedPtr(makeImageView(
218 vk, device, image, mapImageViewType(caseDef.texture.type()), format,
219 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers())));
223 //! Helper function to deal with per-layer resources.
224 void insertDescriptorSets (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkDescriptorPool descriptorPool, const VkDescriptorSetLayout descriptorSetLayout, std::vector<SharedVkDescriptorSet>* const pOutDescriptorSets)
226 if (caseDef.singleLayerBind)
228 pOutDescriptorSets->clear();
229 pOutDescriptorSets->resize(caseDef.texture.numLayers());
230 for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
231 (*pOutDescriptorSets)[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
233 else // bind all layers at once
235 pOutDescriptorSets->clear();
236 pOutDescriptorSets->resize(1);
237 (*pOutDescriptorSets)[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
241 tcu::TestStatus test (Context& context, const CaseDef caseDef)
243 const InstanceInterface& vki = context.getInstanceInterface();
244 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
245 const DeviceInterface& vk = context.getDeviceInterface();
246 const VkDevice device = context.getDevice();
247 const VkQueue queue = context.getUniversalQueue();
248 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
249 Allocator& allocator = context.getDefaultAllocator();
251 checkRequirements(vki, physDevice, caseDef);
255 const UniquePtr<Image> msImage(new Image(
256 vk, device, allocator, makeImageCreateInfo(caseDef.texture, caseDef.format, VK_IMAGE_USAGE_STORAGE_BIT, 0u), MemoryRequirement::Any));
258 const UniquePtr<Image> checksumImage(new Image(
259 vk, device, allocator,
260 makeImageCreateInfo(Texture(caseDef.texture, 1), CHECKSUM_IMAGE_FORMAT, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
261 MemoryRequirement::Any));
263 // Buffer used to pass constants to the shader.
265 const int numLayers = caseDef.texture.numLayers();
266 const VkDeviceSize bufferChunkSize = getOptimalUniformBufferChunkSize(vki, physDevice, sizeof(deInt32));
267 const VkDeviceSize constantsBufferSizeBytes = numLayers * bufferChunkSize;
268 UniquePtr<Buffer> constantsBuffer (new Buffer(vk, device, allocator, makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
269 MemoryRequirement::HostVisible));
272 const Allocation& alloc = constantsBuffer->getAllocation();
273 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
275 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
277 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
279 deInt32* const valuePtr = reinterpret_cast<deInt32*>(basePtr + layerNdx * bufferChunkSize);
280 *valuePtr = layerNdx;
283 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), constantsBufferSizeBytes);
286 const VkDeviceSize resultBufferSizeBytes = getImageSizeBytes(caseDef.texture.size(), CHECKSUM_IMAGE_FORMAT);
287 UniquePtr<Buffer> resultBuffer (new Buffer(vk, device, allocator, makeBufferCreateInfo(resultBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
288 MemoryRequirement::HostVisible));
291 const Allocation& alloc = resultBuffer->getAllocation();
292 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(resultBufferSizeBytes));
293 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), resultBufferSizeBytes);
298 Unique<VkDescriptorSetLayout> descriptorSetLayout(DescriptorSetLayoutBuilder()
299 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
300 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
301 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
304 Unique<VkDescriptorPool> descriptorPool(DescriptorPoolBuilder()
305 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
306 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
307 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
308 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers));
310 std::vector<SharedVkDescriptorSet> allDescriptorSets;
311 std::vector<SharedVkImageView> allMultisampledImageViews;
312 std::vector<SharedVkImageView> allChecksumImageViews;
314 insertDescriptorSets(vk, device, caseDef, *descriptorPool, *descriptorSetLayout, &allDescriptorSets);
315 insertImageViews (vk, device, caseDef, caseDef.format, **msImage, &allMultisampledImageViews);
316 insertImageViews (vk, device, caseDef, CHECKSUM_IMAGE_FORMAT, **checksumImage, &allChecksumImageViews);
320 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
321 const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
322 const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
324 const tcu::IVec3 workSize = (caseDef.singleLayerBind ? caseDef.texture.layerSize() : caseDef.texture.size());
325 const int loopNumLayers = (caseDef.singleLayerBind ? numLayers : 1);
326 const VkImageSubresourceRange subresourceAllLayers = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers());
328 // Pass 1: Write MS image
330 const Unique<VkShaderModule> shaderModule (createShaderModule (vk, device, context.getBinaryCollection().get("comp_store"), 0));
331 const Unique<VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
333 beginCommandBuffer(vk, *cmdBuffer);
334 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
337 const VkImageMemoryBarrier barriers[] =
339 makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
340 makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **checksumImage, subresourceAllLayers),
343 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
344 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
347 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
349 const VkDescriptorSet descriptorSet = **allDescriptorSets[layerNdx];
350 const VkDescriptorImageInfo descriptorMultiImageInfo = makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
351 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
353 DescriptorSetUpdateBuilder()
354 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
355 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
358 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
359 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
362 endCommandBuffer(vk, *cmdBuffer);
363 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
366 // Pass 2: "Resolve" MS image in compute shader
368 const Unique<VkShaderModule> shaderModule (createShaderModule (vk, device, context.getBinaryCollection().get("comp_load"), 0));
369 const Unique<VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
371 beginCommandBuffer(vk, *cmdBuffer);
372 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
375 const VkImageMemoryBarrier barriers[] =
377 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
380 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
381 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
384 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
386 const VkDescriptorSet descriptorSet = **allDescriptorSets[layerNdx];
387 const VkDescriptorImageInfo descriptorMultiImageInfo = makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
388 const VkDescriptorImageInfo descriptorChecksumImageInfo = makeDescriptorImageInfo(DE_NULL, **allChecksumImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
389 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
391 DescriptorSetUpdateBuilder()
392 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
393 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
394 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorChecksumImageInfo)
397 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
398 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
401 endCommandBuffer(vk, *cmdBuffer);
402 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
407 beginCommandBuffer(vk, *cmdBuffer);
410 const VkImageMemoryBarrier barriers[] =
412 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **checksumImage, subresourceAllLayers),
414 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
415 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
418 const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(caseDef.texture.layerSize()), caseDef.texture.numLayers());
419 vk.cmdCopyImageToBuffer(*cmdBuffer, **checksumImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **resultBuffer, 1u, ©Region);
422 const VkBufferMemoryBarrier barriers[] =
424 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **resultBuffer, 0ull, resultBufferSizeBytes),
426 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
427 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers, 0u, DE_NULL);
430 endCommandBuffer(vk, *cmdBuffer);
431 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
436 const Allocation& alloc = resultBuffer->getAllocation();
437 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), resultBufferSizeBytes);
439 const IVec3 imageSize = caseDef.texture.size();
440 const deInt32* pDataPtr = static_cast<deInt32*>(alloc.getHostPtr());
441 const deInt32 expectedChecksum = caseDef.texture.numSamples();
443 for (int layer = 0; layer < imageSize.z(); ++layer)
444 for (int y = 0; y < imageSize.y(); ++y)
445 for (int x = 0; x < imageSize.x(); ++x)
447 if (*pDataPtr != expectedChecksum)
449 context.getTestContext().getLog()
450 << tcu::TestLog::Message << "Some sample colors were incorrect at (x, y, layer) = (" << x << ", " << y << ", " << layer << ")" << tcu::TestLog::EndMessage
451 << tcu::TestLog::Message << "Checksum value is " << *pDataPtr << " but expected " << expectedChecksum << tcu::TestLog::EndMessage;
453 return tcu::TestStatus::fail("Some sample colors were incorrect");
458 return tcu::TestStatus::pass("OK");
464 tcu::TestCaseGroup* createImageMultisampleLoadStoreTests (tcu::TestContext& testCtx)
466 const Texture textures[] =
468 // \note Shader code is tweaked to work with image size of 32, take a look if this needs to be modified.
469 Texture(IMAGE_TYPE_2D, tcu::IVec3(32, 32, 1), 1),
470 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(32, 32, 1), 4),
473 static const VkFormat formats[] =
475 VK_FORMAT_R32G32B32A32_SFLOAT,
476 VK_FORMAT_R16G16B16A16_SFLOAT,
477 VK_FORMAT_R32_SFLOAT,
479 VK_FORMAT_R32G32B32A32_UINT,
480 VK_FORMAT_R16G16B16A16_UINT,
481 VK_FORMAT_R8G8B8A8_UINT,
484 VK_FORMAT_R32G32B32A32_SINT,
485 VK_FORMAT_R16G16B16A16_SINT,
486 VK_FORMAT_R8G8B8A8_SINT,
489 VK_FORMAT_R8G8B8A8_UNORM,
491 VK_FORMAT_R8G8B8A8_SNORM,
494 static const VkSampleCountFlagBits samples[] =
496 VK_SAMPLE_COUNT_2_BIT,
497 VK_SAMPLE_COUNT_4_BIT,
498 VK_SAMPLE_COUNT_8_BIT,
499 VK_SAMPLE_COUNT_16_BIT,
500 VK_SAMPLE_COUNT_32_BIT,
501 VK_SAMPLE_COUNT_64_BIT,
504 MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store_multisample", "Multisampled image store and load"));
506 for (int baseTextureNdx = 0; baseTextureNdx < DE_LENGTH_OF_ARRAY(textures); ++baseTextureNdx)
508 const Texture& baseTexture = textures[baseTextureNdx];
509 MovePtr<tcu::TestCaseGroup> imageViewGroup (new tcu::TestCaseGroup(testCtx, getImageTypeName(baseTexture.type()).c_str(), ""));
510 const int numLayerBindModes = (baseTexture.numLayers() == 1 ? 1 : 2);
512 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx)
513 for (int layerBindMode = 0; layerBindMode < numLayerBindModes; ++layerBindMode)
515 const bool singleLayerBind = (layerBindMode != 0);
516 const std::string formatGroupName = getFormatShortString(formats[formatNdx]) + (singleLayerBind ? "_single_layer" : "");
517 MovePtr<tcu::TestCaseGroup> formatGroup (new tcu::TestCaseGroup(testCtx, formatGroupName.c_str(), ""));
519 for (int samplesNdx = 0; samplesNdx < DE_LENGTH_OF_ARRAY(samples); ++samplesNdx)
521 const std::string samplesCaseName = "samples_" + de::toString(samples[samplesNdx]);
523 const CaseDef caseDef =
525 Texture(baseTexture, samples[samplesNdx]),
531 addFunctionCaseWithPrograms(formatGroup.get(), samplesCaseName, "", initPrograms, test, caseDef);
533 imageViewGroup->addChild(formatGroup.release());
535 testGroup->addChild(imageViewGroup.release());
538 return testGroup.release();