1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
12 * http://www.apache.org/licenses/LICENSE-2.0
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
22 * \brief Multisampled image load/store Tests
23 *//*--------------------------------------------------------------------*/
25 #include "vktImageMultisampleLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBuilderUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkImageUtil.hpp"
41 #include "deUniquePtr.hpp"
43 #include "tcuTextureUtil.hpp"
59 static const VkFormat CHECKSUM_IMAGE_FORMAT = VK_FORMAT_R32_SINT;
65 VkSampleCountFlagBits numSamples;
69 // Multisampled storage image test.
71 // Pass 1: Write a slightly different color pattern per-sample to the whole image.
72 // Pass 2: Read samples of the same image and check if color values are in the expected range.
73 // Write back results as a checksum image and verify them on the host.
74 // Each checksum image pixel should contain an integer equal to the number of samples.
76 void initPrograms (SourceCollections& programCollection, const CaseDef caseDef)
78 const int dimension = (caseDef.singleLayerBind ? caseDef.texture.layerDimension() : caseDef.texture.dimension());
79 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
81 const ImageType usedImageType = (caseDef.singleLayerBind ? getImageTypeForSingleLayer(caseDef.texture.type()) : caseDef.texture.type());
82 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(caseDef.format));
83 const std::string msImageTypeStr = getShaderImageType(mapVkFormat(caseDef.format), usedImageType, (caseDef.texture.numSamples() > 1));
85 const std::string xMax = de::toString(caseDef.texture.size().x() - 1);
86 const std::string yMax = de::toString(caseDef.texture.size().y() - 1);
87 const std::string signednessPrefix = isUintFormat(caseDef.format) ? "u" : isIntFormat(caseDef.format) ? "i" : "";
88 const std::string gvec4Expr = signednessPrefix + "vec4";
89 const int numColorComponents = tcu::getNumUsedChannels(mapVkFormat(caseDef.format).order);
91 const float storeColorScale = computeStoreColorScale(caseDef.format, caseDef.texture.size());
92 const float storeColorBias = computeStoreColorBias(caseDef.format);
93 DE_ASSERT(colorScaleAndBiasAreValid(caseDef.format, storeColorScale, storeColorBias));
95 const std::string colorScaleExpr = (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
96 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
97 const std::string colorExpr =
99 + "gx^gy^gz^(sampleNdx >> 5)^(sampleNdx & 31), " // we "split" sampleNdx to keep this value in [0, 31] range for numSamples = 64 case
100 + (numColorComponents > 1 ? "(" + xMax + "-gx)^gy^gz, " : "0, ")
101 + (numColorComponents > 2 ? "gx^(" + yMax + "-gy)^gz, " : "0, ")
102 + (numColorComponents > 3 ? "(" + xMax + "-gx)^(" + yMax + "-gy)^gz" : "1")
103 + ")" + colorScaleExpr;
107 std::ostringstream src;
108 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
110 << "layout(local_size_x = 1) in;\n"
111 << "layout(set = 0, binding = 1, " << formatQualifierStr << ") writeonly uniform " << msImageTypeStr << " u_msImage;\n";
113 if (caseDef.singleLayerBind)
114 src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
115 << " int u_layerNdx;\n"
119 << "void main (void)\n"
121 << " int gx = int(gl_GlobalInvocationID.x);\n"
122 << " int gy = int(gl_GlobalInvocationID.y);\n"
123 << " int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
125 << " for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
126 << " imageStore(u_msImage, " << texelCoordStr << ", sampleNdx, " << colorExpr << ");\n"
130 programCollection.glslSources.add("comp_store") << glu::ComputeSource(src.str());
135 const tcu::TextureFormat checksumFormat = mapVkFormat(CHECKSUM_IMAGE_FORMAT);
136 const std::string checksumImageTypeStr = getShaderImageType(checksumFormat, usedImageType);
137 const bool useExactCompare = isIntegerFormat(caseDef.format);
139 std::ostringstream src;
140 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n"
142 << "layout(local_size_x = 1) in;\n"
143 << "layout(set = 0, binding = 1, " << formatQualifierStr << ") readonly uniform " << msImageTypeStr << " u_msImage;\n"
144 << "layout(set = 0, binding = 2, " << getShaderImageFormatQualifier(checksumFormat) << ") writeonly uniform " << checksumImageTypeStr << " u_checksumImage;\n";
146 if (caseDef.singleLayerBind)
147 src << "layout(set = 0, binding = 0) readonly uniform Constants {\n"
148 << " int u_layerNdx;\n"
152 << "void main (void)\n"
154 << " int gx = int(gl_GlobalInvocationID.x);\n"
155 << " int gy = int(gl_GlobalInvocationID.y);\n"
156 << " int gz = " << (caseDef.singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
158 << " int checksum = 0;\n"
159 << " for (int sampleNdx = 0; sampleNdx < " << caseDef.texture.numSamples() <<"; ++sampleNdx) {\n"
160 << " " << gvec4Expr << " color = imageLoad(u_msImage, " << texelCoordStr << ", sampleNdx);\n";
163 src << " if (color == " << colorExpr << ")\n"
166 src << " " << gvec4Expr << " diff = abs(abs(color) - abs(" << colorExpr << "));\n"
167 << " if (all(lessThan(diff, " << gvec4Expr << "(0.02))))\n"
172 << " imageStore(u_checksumImage, " << texelCoordStr << ", ivec4(checksum));\n"
175 programCollection.glslSources.add("comp_load") << glu::ComputeSource(src.str());
179 void checkRequirements (const InstanceInterface& vki, const VkPhysicalDevice physDevice, const CaseDef& caseDef)
181 VkPhysicalDeviceFeatures features;
182 vki.getPhysicalDeviceFeatures(physDevice, &features);
184 if (!features.shaderStorageImageMultisample)
185 TCU_THROW(NotSupportedError, "Multisampled storage images are not supported");
187 VkImageFormatProperties imageFormatProperties;
188 const VkResult imageFormatResult = vki.getPhysicalDeviceImageFormatProperties(
189 physDevice, caseDef.format, VK_IMAGE_TYPE_2D, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_STORAGE_BIT, (VkImageCreateFlags)0, &imageFormatProperties);
191 if (imageFormatResult == VK_ERROR_FORMAT_NOT_SUPPORTED)
192 TCU_THROW(NotSupportedError, "Format is not supported");
194 if ((imageFormatProperties.sampleCounts & caseDef.numSamples) != caseDef.numSamples)
195 TCU_THROW(NotSupportedError, "Requested sample count is not supported");
198 //! Helper function to deal with per-layer resources.
199 void insertImageViews (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkFormat format, const VkImage image, std::vector<SharedVkImageView>* const pOutImageViews)
201 if (caseDef.singleLayerBind)
203 pOutImageViews->clear();
204 pOutImageViews->resize(caseDef.texture.numLayers());
205 for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
207 (*pOutImageViews)[layerNdx] = makeVkSharedPtr(makeImageView(
208 vk, device, image, mapImageViewType(getImageTypeForSingleLayer(caseDef.texture.type())), format,
209 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
212 else // bind all layers at once
214 pOutImageViews->clear();
215 pOutImageViews->resize(1);
216 (*pOutImageViews)[0] = makeVkSharedPtr(makeImageView(
217 vk, device, image, mapImageViewType(caseDef.texture.type()), format,
218 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers())));
222 //! Helper function to deal with per-layer resources.
223 void insertDescriptorSets (const DeviceInterface& vk, const VkDevice device, const CaseDef& caseDef, const VkDescriptorPool descriptorPool, const VkDescriptorSetLayout descriptorSetLayout, std::vector<SharedVkDescriptorSet>* const pOutDescriptorSets)
225 if (caseDef.singleLayerBind)
227 pOutDescriptorSets->clear();
228 pOutDescriptorSets->resize(caseDef.texture.numLayers());
229 for (int layerNdx = 0; layerNdx < caseDef.texture.numLayers(); ++layerNdx)
230 (*pOutDescriptorSets)[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
232 else // bind all layers at once
234 pOutDescriptorSets->clear();
235 pOutDescriptorSets->resize(1);
236 (*pOutDescriptorSets)[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, descriptorPool, descriptorSetLayout));
240 tcu::TestStatus test (Context& context, const CaseDef caseDef)
242 const InstanceInterface& vki = context.getInstanceInterface();
243 const VkPhysicalDevice physDevice = context.getPhysicalDevice();
244 const DeviceInterface& vk = context.getDeviceInterface();
245 const VkDevice device = context.getDevice();
246 const VkQueue queue = context.getUniversalQueue();
247 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
248 Allocator& allocator = context.getDefaultAllocator();
250 checkRequirements(vki, physDevice, caseDef);
254 const UniquePtr<Image> msImage(new Image(
255 vk, device, allocator, makeImageCreateInfo(caseDef.texture, caseDef.format, VK_IMAGE_USAGE_STORAGE_BIT, 0u), MemoryRequirement::Any));
257 const UniquePtr<Image> checksumImage(new Image(
258 vk, device, allocator,
259 makeImageCreateInfo(Texture(caseDef.texture, 1), CHECKSUM_IMAGE_FORMAT, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
260 MemoryRequirement::Any));
262 // Buffer used to pass constants to the shader.
264 const int numLayers = caseDef.texture.numLayers();
265 const VkDeviceSize bufferChunkSize = getOptimalUniformBufferChunkSize(vki, physDevice, sizeof(deInt32));
266 const VkDeviceSize constantsBufferSizeBytes = numLayers * bufferChunkSize;
267 UniquePtr<Buffer> constantsBuffer (new Buffer(vk, device, allocator, makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
268 MemoryRequirement::HostVisible));
271 const Allocation& alloc = constantsBuffer->getAllocation();
272 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
274 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
276 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
278 deInt32* const valuePtr = reinterpret_cast<deInt32*>(basePtr + layerNdx * bufferChunkSize);
279 *valuePtr = layerNdx;
282 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), constantsBufferSizeBytes);
285 const VkDeviceSize resultBufferSizeBytes = getImageSizeBytes(caseDef.texture.size(), CHECKSUM_IMAGE_FORMAT);
286 UniquePtr<Buffer> resultBuffer (new Buffer(vk, device, allocator, makeBufferCreateInfo(resultBufferSizeBytes, VK_BUFFER_USAGE_TRANSFER_DST_BIT),
287 MemoryRequirement::HostVisible));
290 const Allocation& alloc = resultBuffer->getAllocation();
291 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(resultBufferSizeBytes));
292 flushMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), resultBufferSizeBytes);
297 Unique<VkDescriptorSetLayout> descriptorSetLayout(DescriptorSetLayoutBuilder()
298 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
299 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
300 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
303 Unique<VkDescriptorPool> descriptorPool(DescriptorPoolBuilder()
304 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
305 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
306 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
307 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers));
309 std::vector<SharedVkDescriptorSet> allDescriptorSets;
310 std::vector<SharedVkImageView> allMultisampledImageViews;
311 std::vector<SharedVkImageView> allChecksumImageViews;
313 insertDescriptorSets(vk, device, caseDef, *descriptorPool, *descriptorSetLayout, &allDescriptorSets);
314 insertImageViews (vk, device, caseDef, caseDef.format, **msImage, &allMultisampledImageViews);
315 insertImageViews (vk, device, caseDef, CHECKSUM_IMAGE_FORMAT, **checksumImage, &allChecksumImageViews);
319 const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout (vk, device, *descriptorSetLayout));
320 const Unique<VkCommandPool> cmdPool (makeCommandPool (vk, device, queueFamilyIndex));
321 const Unique<VkCommandBuffer> cmdBuffer (makeCommandBuffer (vk, device, *cmdPool));
323 const tcu::IVec3 workSize = (caseDef.singleLayerBind ? caseDef.texture.layerSize() : caseDef.texture.size());
324 const int loopNumLayers = (caseDef.singleLayerBind ? numLayers : 1);
325 const VkImageSubresourceRange subresourceAllLayers = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, caseDef.texture.numLayers());
327 // Pass 1: Write MS image
329 const Unique<VkShaderModule> shaderModule (createShaderModule (vk, device, context.getBinaryCollection().get("comp_store"), 0));
330 const Unique<VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
332 beginCommandBuffer(vk, *cmdBuffer);
333 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
336 const VkImageMemoryBarrier barriers[] =
338 makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
339 makeImageMemoryBarrier((VkAccessFlags)0, VK_ACCESS_SHADER_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL, **checksumImage, subresourceAllLayers),
342 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
343 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
346 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
348 const VkDescriptorSet descriptorSet = **allDescriptorSets[layerNdx];
349 const VkDescriptorImageInfo descriptorMultiImageInfo = makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
350 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
352 DescriptorSetUpdateBuilder()
353 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
354 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
357 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
358 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
361 endCommandBuffer(vk, *cmdBuffer);
362 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
365 // Pass 2: "Resolve" MS image in compute shader
367 const Unique<VkShaderModule> shaderModule (createShaderModule (vk, device, context.getBinaryCollection().get("comp_load"), 0));
368 const Unique<VkPipeline> pipeline (makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
370 beginCommandBuffer(vk, *cmdBuffer);
371 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
374 const VkImageMemoryBarrier barriers[] =
376 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL, **msImage, subresourceAllLayers),
379 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0,
380 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
383 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
385 const VkDescriptorSet descriptorSet = **allDescriptorSets[layerNdx];
386 const VkDescriptorImageInfo descriptorMultiImageInfo = makeDescriptorImageInfo(DE_NULL, **allMultisampledImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
387 const VkDescriptorImageInfo descriptorChecksumImageInfo = makeDescriptorImageInfo(DE_NULL, **allChecksumImageViews[layerNdx], VK_IMAGE_LAYOUT_GENERAL);
388 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(constantsBuffer->get(), layerNdx*bufferChunkSize, bufferChunkSize);
390 DescriptorSetUpdateBuilder()
391 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
392 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorMultiImageInfo)
393 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(2u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorChecksumImageInfo)
396 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
397 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
400 endCommandBuffer(vk, *cmdBuffer);
401 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
406 beginCommandBuffer(vk, *cmdBuffer);
409 const VkImageMemoryBarrier barriers[] =
411 makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **checksumImage, subresourceAllLayers),
413 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0,
414 0u, DE_NULL, 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers);
417 const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(caseDef.texture.layerSize()), caseDef.texture.numLayers());
418 vk.cmdCopyImageToBuffer(*cmdBuffer, **checksumImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, **resultBuffer, 1u, ©Region);
421 const VkBufferMemoryBarrier barriers[] =
423 makeBufferMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, **resultBuffer, 0ull, resultBufferSizeBytes),
425 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
426 0u, DE_NULL, DE_LENGTH_OF_ARRAY(barriers), barriers, 0u, DE_NULL);
429 endCommandBuffer(vk, *cmdBuffer);
430 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
435 const Allocation& alloc = resultBuffer->getAllocation();
436 invalidateMappedMemoryRange(vk, device, alloc.getMemory(), alloc.getOffset(), resultBufferSizeBytes);
438 const IVec3 imageSize = caseDef.texture.size();
439 const deInt32* pDataPtr = static_cast<deInt32*>(alloc.getHostPtr());
440 const deInt32 expectedChecksum = caseDef.texture.numSamples();
442 for (int layer = 0; layer < imageSize.z(); ++layer)
443 for (int y = 0; y < imageSize.y(); ++y)
444 for (int x = 0; x < imageSize.x(); ++x)
446 if (*pDataPtr != expectedChecksum)
448 context.getTestContext().getLog()
449 << tcu::TestLog::Message << "Some sample colors were incorrect at (x, y, layer) = (" << x << ", " << y << ", " << layer << ")" << tcu::TestLog::EndMessage
450 << tcu::TestLog::Message << "Checksum value is " << *pDataPtr << " but expected " << expectedChecksum << tcu::TestLog::EndMessage;
452 return tcu::TestStatus::fail("Some sample colors were incorrect");
457 return tcu::TestStatus::pass("OK");
463 tcu::TestCaseGroup* createImageMultisampleLoadStoreTests (tcu::TestContext& testCtx)
465 const Texture textures[] =
467 // \note Shader code is tweaked to work with image size of 32, take a look if this needs to be modified.
468 Texture(IMAGE_TYPE_2D, tcu::IVec3(32, 32, 1), 1),
469 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(32, 32, 1), 4),
472 static const VkFormat formats[] =
474 VK_FORMAT_R32G32B32A32_SFLOAT,
475 VK_FORMAT_R16G16B16A16_SFLOAT,
476 VK_FORMAT_R32_SFLOAT,
478 VK_FORMAT_R32G32B32A32_UINT,
479 VK_FORMAT_R16G16B16A16_UINT,
480 VK_FORMAT_R8G8B8A8_UINT,
483 VK_FORMAT_R32G32B32A32_SINT,
484 VK_FORMAT_R16G16B16A16_SINT,
485 VK_FORMAT_R8G8B8A8_SINT,
488 VK_FORMAT_R8G8B8A8_UNORM,
490 VK_FORMAT_R8G8B8A8_SNORM,
493 static const VkSampleCountFlagBits samples[] =
495 VK_SAMPLE_COUNT_2_BIT,
496 VK_SAMPLE_COUNT_4_BIT,
497 VK_SAMPLE_COUNT_8_BIT,
498 VK_SAMPLE_COUNT_16_BIT,
499 VK_SAMPLE_COUNT_32_BIT,
500 VK_SAMPLE_COUNT_64_BIT,
503 MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store_multisample", "Multisampled image store and load"));
505 for (int baseTextureNdx = 0; baseTextureNdx < DE_LENGTH_OF_ARRAY(textures); ++baseTextureNdx)
507 const Texture& baseTexture = textures[baseTextureNdx];
508 MovePtr<tcu::TestCaseGroup> imageViewGroup (new tcu::TestCaseGroup(testCtx, getImageTypeName(baseTexture.type()).c_str(), ""));
509 const int numLayerBindModes = (baseTexture.numLayers() == 1 ? 1 : 2);
511 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(formats); ++formatNdx)
512 for (int layerBindMode = 0; layerBindMode < numLayerBindModes; ++layerBindMode)
514 const bool singleLayerBind = (layerBindMode != 0);
515 const std::string formatGroupName = getFormatShortString(formats[formatNdx]) + (singleLayerBind ? "_single_layer" : "");
516 MovePtr<tcu::TestCaseGroup> formatGroup (new tcu::TestCaseGroup(testCtx, formatGroupName.c_str(), ""));
518 for (int samplesNdx = 0; samplesNdx < DE_LENGTH_OF_ARRAY(samples); ++samplesNdx)
520 const std::string samplesCaseName = "samples_" + de::toString(samples[samplesNdx]);
522 const CaseDef caseDef =
524 Texture(baseTexture, samples[samplesNdx]),
530 addFunctionCaseWithPrograms(formatGroup.get(), samplesCaseName, "", initPrograms, test, caseDef);
532 imageViewGroup->addChild(formatGroup.release());
534 testGroup->addChild(imageViewGroup.release());
537 return testGroup.release();