1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
13 * http://www.apache.org/licenses/LICENSE-2.0
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
23 * \brief Vulkan ShaderRenderCase
24 *//*--------------------------------------------------------------------*/
26 #include "vktShaderRender.hpp"
28 #include "tcuImageCompare.hpp"
29 #include "tcuImageIO.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuTextureUtil.hpp"
32 #include "tcuSurface.hpp"
33 #include "tcuVector.hpp"
35 #include "deFilePath.hpp"
37 #include "deUniquePtr.hpp"
39 #include "vkDeviceUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkPlatform.hpp"
42 #include "vkQueryUtil.hpp"
44 #include "vkRefUtil.hpp"
45 #include "vkStrUtil.hpp"
46 #include "vkTypeUtil.hpp"
61 static const int GRID_SIZE = 64;
62 static const deUint32 MAX_RENDER_WIDTH = 128;
63 static const deUint32 MAX_RENDER_HEIGHT = 128;
64 static const tcu::Vec4 DEFAULT_CLEAR_COLOR = tcu::Vec4(0.125f, 0.25f, 0.5f, 1.0f);
66 static VkImageViewType textureTypeToImageViewType (TextureBinding::Type type)
70 case TextureBinding::TYPE_1D: return VK_IMAGE_VIEW_TYPE_1D;
71 case TextureBinding::TYPE_2D: return VK_IMAGE_VIEW_TYPE_2D;
72 case TextureBinding::TYPE_3D: return VK_IMAGE_VIEW_TYPE_3D;
73 case TextureBinding::TYPE_CUBE_MAP: return VK_IMAGE_VIEW_TYPE_CUBE;
74 case TextureBinding::TYPE_1D_ARRAY: return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
75 case TextureBinding::TYPE_2D_ARRAY: return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
76 case TextureBinding::TYPE_CUBE_ARRAY: return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
79 DE_FATAL("Impossible");
80 return (VkImageViewType)0;
84 static VkImageType viewTypeToImageType (VkImageViewType type)
88 case VK_IMAGE_VIEW_TYPE_1D:
89 case VK_IMAGE_VIEW_TYPE_1D_ARRAY: return VK_IMAGE_TYPE_1D;
90 case VK_IMAGE_VIEW_TYPE_2D:
91 case VK_IMAGE_VIEW_TYPE_2D_ARRAY: return VK_IMAGE_TYPE_2D;
92 case VK_IMAGE_VIEW_TYPE_3D: return VK_IMAGE_TYPE_3D;
93 case VK_IMAGE_VIEW_TYPE_CUBE:
94 case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY: return VK_IMAGE_TYPE_2D;
97 DE_FATAL("Impossible");
98 return (VkImageType)0;
102 /*! Gets the next multiple of a given divisor */
103 static deUint32 getNextMultiple (deUint32 divisor, deUint32 value)
105 if (value % divisor == 0)
109 return value + divisor - (value % divisor);
112 /*! Gets the next value that is multiple of all given divisors */
113 static deUint32 getNextMultiple (const std::vector<deUint32>& divisors, deUint32 value)
115 deUint32 nextMultiple = value;
116 bool nextMultipleFound = false;
120 nextMultipleFound = true;
122 for (size_t divNdx = 0; divNdx < divisors.size(); divNdx++)
123 nextMultipleFound = nextMultipleFound && (nextMultiple % divisors[divNdx] == 0);
125 if (nextMultipleFound)
128 DE_ASSERT(nextMultiple < ~((deUint32)0u));
129 nextMultiple = getNextMultiple(divisors[0], nextMultiple + 1);
142 QuadGrid (int gridSize,
145 const tcu::Vec4& constCoords,
146 const std::vector<tcu::Mat4>& userAttribTransforms,
147 const std::vector<TextureBindingSp>& textures);
150 int getGridSize (void) const { return m_gridSize; }
151 int getNumVertices (void) const { return m_numVertices; }
152 int getNumTriangles (void) const { return m_numTriangles; }
153 const tcu::Vec4& getConstCoords (void) const { return m_constCoords; }
154 const std::vector<tcu::Mat4> getUserAttribTransforms (void) const { return m_userAttribTransforms; }
155 const std::vector<TextureBindingSp>& getTextures (void) const { return m_textures; }
157 const tcu::Vec4* getPositions (void) const { return &m_positions[0]; }
158 const float* getAttribOne (void) const { return &m_attribOne[0]; }
159 const tcu::Vec4* getCoords (void) const { return &m_coords[0]; }
160 const tcu::Vec4* getUnitCoords (void) const { return &m_unitCoords[0]; }
162 const tcu::Vec4* getUserAttrib (int attribNdx) const { return &m_userAttribs[attribNdx][0]; }
163 const deUint16* getIndices (void) const { return &m_indices[0]; }
165 tcu::Vec4 getCoords (float sx, float sy) const;
166 tcu::Vec4 getUnitCoords (float sx, float sy) const;
168 int getNumUserAttribs (void) const { return (int)m_userAttribTransforms.size(); }
169 tcu::Vec4 getUserAttrib (int attribNdx, float sx, float sy) const;
172 const int m_gridSize;
173 const int m_numVertices;
174 const int m_numTriangles;
175 const tcu::Vec4 m_constCoords;
176 const std::vector<tcu::Mat4> m_userAttribTransforms;
178 const std::vector<TextureBindingSp>& m_textures;
180 std::vector<tcu::Vec4> m_screenPos;
181 std::vector<tcu::Vec4> m_positions;
182 std::vector<tcu::Vec4> m_coords; //!< Near-unit coordinates, roughly [-2.0 .. 2.0].
183 std::vector<tcu::Vec4> m_unitCoords; //!< Positive-only coordinates [0.0 .. 1.5].
184 std::vector<float> m_attribOne;
185 std::vector<tcu::Vec4> m_userAttribs[ShaderEvalContext::MAX_TEXTURES];
186 std::vector<deUint16> m_indices;
189 QuadGrid::QuadGrid (int gridSize,
192 const tcu::Vec4& constCoords,
193 const std::vector<tcu::Mat4>& userAttribTransforms,
194 const std::vector<TextureBindingSp>& textures)
195 : m_gridSize (gridSize)
196 , m_numVertices ((gridSize + 1) * (gridSize + 1))
197 , m_numTriangles (gridSize * gridSize * 2)
198 , m_constCoords (constCoords)
199 , m_userAttribTransforms (userAttribTransforms)
200 , m_textures (textures)
202 const tcu::Vec4 viewportScale ((float)width, (float)height, 0.0f, 0.0f);
205 m_screenPos.resize(m_numVertices);
206 m_positions.resize(m_numVertices);
207 m_coords.resize(m_numVertices);
208 m_unitCoords.resize(m_numVertices);
209 m_attribOne.resize(m_numVertices);
212 for (int attrNdx = 0; attrNdx < DE_LENGTH_OF_ARRAY(m_userAttribs); attrNdx++)
213 m_userAttribs[attrNdx].resize(m_numVertices);
215 for (int y = 0; y < gridSize+1; y++)
216 for (int x = 0; x < gridSize+1; x++)
218 float sx = (float)x / (float)gridSize;
219 float sy = (float)y / (float)gridSize;
220 float fx = 2.0f * sx - 1.0f;
221 float fy = 2.0f * sy - 1.0f;
222 int vtxNdx = ((y * (gridSize+1)) + x);
224 m_positions[vtxNdx] = tcu::Vec4(fx, fy, 0.0f, 1.0f);
225 m_coords[vtxNdx] = getCoords(sx, sy);
226 m_unitCoords[vtxNdx] = getUnitCoords(sx, sy);
227 m_attribOne[vtxNdx] = 1.0f;
229 m_screenPos[vtxNdx] = tcu::Vec4(sx, sy, 0.0f, 1.0f) * viewportScale;
231 for (int attribNdx = 0; attribNdx < getNumUserAttribs(); attribNdx++)
232 m_userAttribs[attribNdx][vtxNdx] = getUserAttrib(attribNdx, sx, sy);
236 m_indices.resize(3 * m_numTriangles);
237 for (int y = 0; y < gridSize; y++)
238 for (int x = 0; x < gridSize; x++)
240 int stride = gridSize + 1;
241 int v00 = (y * stride) + x;
242 int v01 = (y * stride) + x + 1;
243 int v10 = ((y+1) * stride) + x;
244 int v11 = ((y+1) * stride) + x + 1;
246 int baseNdx = ((y * gridSize) + x) * 6;
247 m_indices[baseNdx + 0] = (deUint16)v10;
248 m_indices[baseNdx + 1] = (deUint16)v00;
249 m_indices[baseNdx + 2] = (deUint16)v01;
251 m_indices[baseNdx + 3] = (deUint16)v10;
252 m_indices[baseNdx + 4] = (deUint16)v01;
253 m_indices[baseNdx + 5] = (deUint16)v11;
257 QuadGrid::~QuadGrid (void)
261 inline tcu::Vec4 QuadGrid::getCoords (float sx, float sy) const
263 const float fx = 2.0f * sx - 1.0f;
264 const float fy = 2.0f * sy - 1.0f;
265 return tcu::Vec4(fx, fy, -fx + 0.33f*fy, -0.275f*fx - fy);
268 inline tcu::Vec4 QuadGrid::getUnitCoords (float sx, float sy) const
270 return tcu::Vec4(sx, sy, 0.33f*sx + 0.5f*sy, 0.5f*sx + 0.25f*sy);
273 inline tcu::Vec4 QuadGrid::getUserAttrib (int attribNdx, float sx, float sy) const
275 // homogeneous normalized screen-space coordinates
276 return m_userAttribTransforms[attribNdx] * tcu::Vec4(sx, sy, 0.0f, 1.0f);
281 TextureBinding::TextureBinding (const tcu::Archive& archive,
282 const char* filename,
284 const tcu::Sampler& sampler)
286 , m_sampler (sampler)
290 case TYPE_2D: m_binding.tex2D = loadTexture2D(archive, filename).release(); break;
292 DE_FATAL("Unsupported texture type");
296 TextureBinding::TextureBinding (const tcu::Texture1D* tex1D, const tcu::Sampler& sampler)
298 , m_sampler (sampler)
300 m_binding.tex1D = tex1D;
303 TextureBinding::TextureBinding (const tcu::Texture2D* tex2D, const tcu::Sampler& sampler)
305 , m_sampler (sampler)
307 m_binding.tex2D = tex2D;
310 TextureBinding::TextureBinding (const tcu::Texture3D* tex3D, const tcu::Sampler& sampler)
312 , m_sampler (sampler)
314 m_binding.tex3D = tex3D;
317 TextureBinding::TextureBinding (const tcu::TextureCube* texCube, const tcu::Sampler& sampler)
318 : m_type (TYPE_CUBE_MAP)
319 , m_sampler (sampler)
321 m_binding.texCube = texCube;
324 TextureBinding::TextureBinding (const tcu::Texture1DArray* tex1DArray, const tcu::Sampler& sampler)
325 : m_type (TYPE_1D_ARRAY)
326 , m_sampler (sampler)
328 m_binding.tex1DArray = tex1DArray;
331 TextureBinding::TextureBinding (const tcu::Texture2DArray* tex2DArray, const tcu::Sampler& sampler)
332 : m_type (TYPE_2D_ARRAY)
333 , m_sampler (sampler)
335 m_binding.tex2DArray = tex2DArray;
338 TextureBinding::TextureBinding (const tcu::TextureCubeArray* texCubeArray, const tcu::Sampler& sampler)
339 : m_type (TYPE_CUBE_ARRAY)
340 , m_sampler (sampler)
342 m_binding.texCubeArray = texCubeArray;
345 TextureBinding::~TextureBinding (void)
349 case TYPE_1D: delete m_binding.tex1D; break;
350 case TYPE_2D: delete m_binding.tex2D; break;
351 case TYPE_3D: delete m_binding.tex3D; break;
352 case TYPE_CUBE_MAP: delete m_binding.texCube; break;
353 case TYPE_1D_ARRAY: delete m_binding.tex1DArray; break;
354 case TYPE_2D_ARRAY: delete m_binding.tex2DArray; break;
355 case TYPE_CUBE_ARRAY: delete m_binding.texCubeArray; break;
360 de::MovePtr<tcu::Texture2D> TextureBinding::loadTexture2D (const tcu::Archive& archive, const char* filename)
362 tcu::TextureLevel level;
363 tcu::ImageIO::loadImage(level, archive, filename);
365 TCU_CHECK_INTERNAL(level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8) ||
366 level.getFormat() == tcu::TextureFormat(tcu::TextureFormat::RGB, tcu::TextureFormat::UNORM_INT8));
368 // \todo [2015-10-08 elecro] for some reason we get better when using RGBA texture even in RGB case, this needs to be investigated
369 de::MovePtr<tcu::Texture2D> texture(new tcu::Texture2D(tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8), level.getWidth(), level.getHeight()));
372 texture->allocLevel(0);
373 tcu::copy(texture->getLevel(0), level.getAccess());
378 // ShaderEvalContext.
380 ShaderEvalContext::ShaderEvalContext (const QuadGrid& quadGrid)
381 : constCoords (quadGrid.getConstCoords())
382 , isDiscarded (false)
383 , m_quadGrid (quadGrid)
385 const std::vector<TextureBindingSp>& bindings = m_quadGrid.getTextures();
386 DE_ASSERT((int)bindings.size() <= MAX_TEXTURES);
388 // Fill in texture array.
389 for (int ndx = 0; ndx < (int)bindings.size(); ndx++)
391 const TextureBinding& binding = *bindings[ndx];
393 if (binding.getType() == TextureBinding::TYPE_NONE)
396 textures[ndx].sampler = binding.getSampler();
398 switch (binding.getType())
400 case TextureBinding::TYPE_1D: textures[ndx].tex1D = &binding.get1D(); break;
401 case TextureBinding::TYPE_2D: textures[ndx].tex2D = &binding.get2D(); break;
402 case TextureBinding::TYPE_3D: textures[ndx].tex3D = &binding.get3D(); break;
403 case TextureBinding::TYPE_CUBE_MAP: textures[ndx].texCube = &binding.getCube(); break;
404 case TextureBinding::TYPE_1D_ARRAY: textures[ndx].tex1DArray = &binding.get1DArray(); break;
405 case TextureBinding::TYPE_2D_ARRAY: textures[ndx].tex2DArray = &binding.get2DArray(); break;
406 case TextureBinding::TYPE_CUBE_ARRAY: textures[ndx].texCubeArray = &binding.getCubeArray(); break;
408 TCU_THROW(InternalError, "Handling of texture binding type not implemented");
413 ShaderEvalContext::~ShaderEvalContext (void)
417 void ShaderEvalContext::reset (float sx, float sy)
420 color = tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
424 coords = m_quadGrid.getCoords(sx, sy);
425 unitCoords = m_quadGrid.getUnitCoords(sx, sy);
427 // Compute user attributes.
428 const int numAttribs = m_quadGrid.getNumUserAttribs();
429 DE_ASSERT(numAttribs <= MAX_USER_ATTRIBS);
430 for (int attribNdx = 0; attribNdx < numAttribs; attribNdx++)
431 in[attribNdx] = m_quadGrid.getUserAttrib(attribNdx, sx, sy);
434 tcu::Vec4 ShaderEvalContext::texture2D (int unitNdx, const tcu::Vec2& texCoords)
436 if (textures[unitNdx].tex2D)
437 return textures[unitNdx].tex2D->sample(textures[unitNdx].sampler, texCoords.x(), texCoords.y(), 0.0f);
439 return tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f);
444 ShaderEvaluator::ShaderEvaluator (void)
445 : m_evalFunc(DE_NULL)
449 ShaderEvaluator::ShaderEvaluator (ShaderEvalFunc evalFunc)
450 : m_evalFunc(evalFunc)
454 ShaderEvaluator::~ShaderEvaluator (void)
458 void ShaderEvaluator::evaluate (ShaderEvalContext& ctx) const
460 DE_ASSERT(m_evalFunc);
466 UniformSetup::UniformSetup (void)
467 : m_setupFunc(DE_NULL)
471 UniformSetup::UniformSetup (UniformSetupFunc setupFunc)
472 : m_setupFunc(setupFunc)
476 UniformSetup::~UniformSetup (void)
480 void UniformSetup::setup (ShaderRenderCaseInstance& instance, const tcu::Vec4& constCoords) const
483 m_setupFunc(instance, constCoords);
488 ShaderRenderCase::ShaderRenderCase (tcu::TestContext& testCtx,
489 const std::string& name,
490 const std::string& description,
491 const bool isVertexCase,
492 const ShaderEvalFunc evalFunc,
493 const UniformSetup* uniformSetup,
494 const AttributeSetupFunc attribFunc)
495 : vkt::TestCase (testCtx, name, description)
496 , m_isVertexCase (isVertexCase)
497 , m_evaluator (new ShaderEvaluator(evalFunc))
498 , m_uniformSetup (uniformSetup ? uniformSetup : new UniformSetup())
499 , m_attribFunc (attribFunc)
502 ShaderRenderCase::ShaderRenderCase (tcu::TestContext& testCtx,
503 const std::string& name,
504 const std::string& description,
505 const bool isVertexCase,
506 const ShaderEvaluator* evaluator,
507 const UniformSetup* uniformSetup,
508 const AttributeSetupFunc attribFunc)
509 : vkt::TestCase (testCtx, name, description)
510 , m_isVertexCase (isVertexCase)
511 , m_evaluator (evaluator)
512 , m_uniformSetup (uniformSetup ? uniformSetup : new UniformSetup())
513 , m_attribFunc (attribFunc)
516 ShaderRenderCase::~ShaderRenderCase (void)
520 void ShaderRenderCase::initPrograms (vk::SourceCollections& programCollection) const
522 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
523 programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
526 TestInstance* ShaderRenderCase::createInstance (Context& context) const
528 DE_ASSERT(m_evaluator != DE_NULL);
529 DE_ASSERT(m_uniformSetup != DE_NULL);
530 return new ShaderRenderCaseInstance(context, m_isVertexCase, *m_evaluator, *m_uniformSetup, m_attribFunc);
533 // ShaderRenderCaseInstance.
535 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context)
536 : vkt::TestInstance (context)
537 , m_imageBackingMode (IMAGE_BACKING_MODE_REGULAR)
538 , m_sparseContext (createSparseContext())
539 , m_memAlloc (getAllocator())
540 , m_clearColor (DEFAULT_CLEAR_COLOR)
541 , m_isVertexCase (false)
542 , m_vertexShaderName ("vert")
543 , m_fragmentShaderName ("frag")
544 , m_renderSize (128, 128)
545 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
546 , m_evaluator (DE_NULL)
547 , m_uniformSetup (DE_NULL)
548 , m_attribFunc (DE_NULL)
549 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
554 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context,
555 const bool isVertexCase,
556 const ShaderEvaluator& evaluator,
557 const UniformSetup& uniformSetup,
558 const AttributeSetupFunc attribFunc,
559 const ImageBackingMode imageBackingMode)
560 : vkt::TestInstance (context)
561 , m_imageBackingMode (imageBackingMode)
562 , m_sparseContext (createSparseContext())
563 , m_memAlloc (getAllocator())
564 , m_clearColor (DEFAULT_CLEAR_COLOR)
565 , m_isVertexCase (isVertexCase)
566 , m_vertexShaderName ("vert")
567 , m_fragmentShaderName ("frag")
568 , m_renderSize (128, 128)
569 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
570 , m_evaluator (&evaluator)
571 , m_uniformSetup (&uniformSetup)
572 , m_attribFunc (attribFunc)
573 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
577 ShaderRenderCaseInstance::ShaderRenderCaseInstance (Context& context,
578 const bool isVertexCase,
579 const ShaderEvaluator* evaluator,
580 const UniformSetup* uniformSetup,
581 const AttributeSetupFunc attribFunc,
582 const ImageBackingMode imageBackingMode)
583 : vkt::TestInstance (context)
584 , m_imageBackingMode (imageBackingMode)
585 , m_sparseContext (createSparseContext())
586 , m_memAlloc (getAllocator())
587 , m_clearColor (DEFAULT_CLEAR_COLOR)
588 , m_isVertexCase (isVertexCase)
589 , m_vertexShaderName ("vert")
590 , m_fragmentShaderName ("frag")
591 , m_renderSize (128, 128)
592 , m_colorFormat (VK_FORMAT_R8G8B8A8_UNORM)
593 , m_evaluator (evaluator)
594 , m_uniformSetup (uniformSetup)
595 , m_attribFunc (attribFunc)
596 , m_sampleCount (VK_SAMPLE_COUNT_1_BIT)
600 static deUint32 findQueueFamilyIndexWithCaps (const InstanceInterface& vkInstance, VkPhysicalDevice physicalDevice, VkQueueFlags requiredCaps)
602 const std::vector<VkQueueFamilyProperties> queueProps = getPhysicalDeviceQueueFamilyProperties(vkInstance, physicalDevice);
604 for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
606 if ((queueProps[queueNdx].queueFlags & requiredCaps) == requiredCaps)
607 return (deUint32)queueNdx;
610 TCU_THROW(NotSupportedError, "No matching queue found");
614 ShaderRenderCaseInstance::SparseContext::SparseContext (vkt::Context& context)
615 : m_context (context)
616 , m_queueFamilyIndex (findQueueFamilyIndexWithCaps(context.getInstanceInterface(), context.getPhysicalDevice(), VK_QUEUE_GRAPHICS_BIT|VK_QUEUE_SPARSE_BINDING_BIT))
617 , m_device (createDevice())
618 , m_deviceInterface (context.getInstanceInterface(), *m_device)
619 , m_queue (getDeviceQueue(m_deviceInterface, *m_device, m_queueFamilyIndex, 0))
620 , m_allocator (createAllocator())
624 Move<VkDevice> ShaderRenderCaseInstance::SparseContext::createDevice () const
626 const InstanceInterface& vk = m_context.getInstanceInterface();
627 const VkPhysicalDevice physicalDevice = m_context.getPhysicalDevice();
628 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(vk, physicalDevice);
630 VkDeviceQueueCreateInfo queueInfo;
631 VkDeviceCreateInfo deviceInfo;
632 const float queuePriority = 1.0f;
634 deMemset(&queueInfo, 0, sizeof(queueInfo));
635 deMemset(&deviceInfo, 0, sizeof(deviceInfo));
637 queueInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
638 queueInfo.pNext = DE_NULL;
639 queueInfo.flags = (VkDeviceQueueCreateFlags)0u;
640 queueInfo.queueFamilyIndex = m_queueFamilyIndex;
641 queueInfo.queueCount = 1u;
642 queueInfo.pQueuePriorities = &queuePriority;
644 deviceInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
645 deviceInfo.pNext = DE_NULL;
646 deviceInfo.queueCreateInfoCount = 1u;
647 deviceInfo.pQueueCreateInfos = &queueInfo;
648 deviceInfo.enabledExtensionCount = 0u;
649 deviceInfo.ppEnabledExtensionNames = DE_NULL;
650 deviceInfo.enabledLayerCount = 0u;
651 deviceInfo.ppEnabledLayerNames = DE_NULL;
652 deviceInfo.pEnabledFeatures = &deviceFeatures;
654 return vk::createDevice(vk, physicalDevice, &deviceInfo);
657 vk::Allocator* ShaderRenderCaseInstance::SparseContext::createAllocator () const
659 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
660 return new SimpleAllocator(m_deviceInterface, *m_device, memoryProperties);
663 ShaderRenderCaseInstance::SparseContext* ShaderRenderCaseInstance::createSparseContext (void) const
665 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
667 return new SparseContext(m_context);
673 vk::Allocator& ShaderRenderCaseInstance::getAllocator (void) const
675 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
677 return *m_sparseContext->m_allocator;
680 return m_context.getDefaultAllocator();
683 ShaderRenderCaseInstance::~ShaderRenderCaseInstance (void)
687 VkDevice ShaderRenderCaseInstance::getDevice (void) const
689 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
690 return *m_sparseContext->m_device;
692 return m_context.getDevice();
695 deUint32 ShaderRenderCaseInstance::getUniversalQueueFamilyIndex (void) const
697 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
698 return m_sparseContext->m_queueFamilyIndex;
700 return m_context.getUniversalQueueFamilyIndex();
703 const DeviceInterface& ShaderRenderCaseInstance::getDeviceInterface (void) const
705 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
706 return m_sparseContext->m_deviceInterface;
708 return m_context.getDeviceInterface();
711 VkQueue ShaderRenderCaseInstance::getUniversalQueue (void) const
713 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
714 return m_sparseContext->m_queue;
716 return m_context.getUniversalQueue();
719 VkPhysicalDevice ShaderRenderCaseInstance::getPhysicalDevice (void) const
721 // Same in sparse and regular case
722 return m_context.getPhysicalDevice();
725 const InstanceInterface& ShaderRenderCaseInstance::getInstanceInterface (void) const
727 // Same in sparse and regular case
728 return m_context.getInstanceInterface();
731 tcu::TestStatus ShaderRenderCaseInstance::iterate (void)
736 const tcu::UVec2 viewportSize = getViewportSize();
737 const int width = viewportSize.x();
738 const int height = viewportSize.y();
740 m_quadGrid = de::MovePtr<QuadGrid>(new QuadGrid(m_isVertexCase ? GRID_SIZE : 4, width, height, getDefaultConstCoords(), m_userAttribTransforms, m_textures));
743 tcu::Surface resImage (width, height);
745 render(m_quadGrid->getNumVertices(), m_quadGrid->getNumTriangles(), m_quadGrid->getIndices(), m_quadGrid->getConstCoords());
746 tcu::copy(resImage.getAccess(), m_resultImage.getAccess());
748 // Compute reference.
749 tcu::Surface refImage (width, height);
751 computeVertexReference(refImage, *m_quadGrid);
753 computeFragmentReference(refImage, *m_quadGrid);
756 const bool compareOk = compareImages(resImage, refImage, 0.1f);
759 return tcu::TestStatus::pass("Result image matches reference");
761 return tcu::TestStatus::fail("Image mismatch");
764 void ShaderRenderCaseInstance::setup (void)
766 m_resultImage = tcu::TextureLevel();
767 m_descriptorSetLayoutBuilder = de::MovePtr<DescriptorSetLayoutBuilder> (new DescriptorSetLayoutBuilder());
768 m_descriptorPoolBuilder = de::MovePtr<DescriptorPoolBuilder> (new DescriptorPoolBuilder());
769 m_descriptorSetUpdateBuilder = de::MovePtr<DescriptorSetUpdateBuilder> (new DescriptorSetUpdateBuilder());
771 m_uniformInfos.clear();
772 m_vertexBindingDescription.clear();
773 m_vertexAttributeDescription.clear();
774 m_vertexBuffers.clear();
775 m_vertexBufferAllocs.clear();
776 m_pushConstantRanges.clear();
779 void ShaderRenderCaseInstance::setupUniformData (deUint32 bindingLocation, size_t size, const void* dataPtr)
781 const VkDevice vkDevice = getDevice();
782 const DeviceInterface& vk = getDeviceInterface();
783 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
785 const VkBufferCreateInfo uniformBufferParams =
787 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
788 DE_NULL, // const void* pNext;
789 0u, // VkBufferCreateFlags flags;
790 size, // VkDeviceSize size;
791 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, // VkBufferUsageFlags usage;
792 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
793 1u, // deUint32 queueFamilyCount;
794 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
797 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &uniformBufferParams);
798 de::MovePtr<Allocation> alloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
799 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
801 deMemcpy(alloc->getHostPtr(), dataPtr, size);
802 flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), size);
804 de::MovePtr<BufferUniform> uniformInfo(new BufferUniform());
805 uniformInfo->type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
806 uniformInfo->descriptor = makeDescriptorBufferInfo(*buffer, 0u, size);
807 uniformInfo->location = bindingLocation;
808 uniformInfo->buffer = VkBufferSp(new vk::Unique<VkBuffer>(buffer));
809 uniformInfo->alloc = AllocationSp(alloc.release());
811 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniformInfo)));
814 void ShaderRenderCaseInstance::addUniform (deUint32 bindingLocation, vk::VkDescriptorType descriptorType, size_t dataSize, const void* data)
816 m_descriptorSetLayoutBuilder->addSingleBinding(descriptorType, vk::VK_SHADER_STAGE_ALL);
817 m_descriptorPoolBuilder->addType(descriptorType);
819 setupUniformData(bindingLocation, dataSize, data);
822 void ShaderRenderCaseInstance::addAttribute (deUint32 bindingLocation,
824 deUint32 sizePerElement,
828 // Add binding specification
829 const deUint32 binding = (deUint32)m_vertexBindingDescription.size();
830 const VkVertexInputBindingDescription bindingDescription =
832 binding, // deUint32 binding;
833 sizePerElement, // deUint32 stride;
834 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputRate stepRate;
837 m_vertexBindingDescription.push_back(bindingDescription);
839 // Add location and format specification
840 const VkVertexInputAttributeDescription attributeDescription =
842 bindingLocation, // deUint32 location;
843 binding, // deUint32 binding;
844 format, // VkFormat format;
845 0u, // deUint32 offset;
848 m_vertexAttributeDescription.push_back(attributeDescription);
850 // Upload data to buffer
851 const VkDevice vkDevice = getDevice();
852 const DeviceInterface& vk = getDeviceInterface();
853 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
855 const VkDeviceSize inputSize = sizePerElement * count;
856 const VkBufferCreateInfo vertexBufferParams =
858 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
859 DE_NULL, // const void* pNext;
860 0u, // VkBufferCreateFlags flags;
861 inputSize, // VkDeviceSize size;
862 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // VkBufferUsageFlags usage;
863 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
864 1u, // deUint32 queueFamilyCount;
865 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
868 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &vertexBufferParams);
869 de::MovePtr<vk::Allocation> alloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
870 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, alloc->getMemory(), alloc->getOffset()));
872 deMemcpy(alloc->getHostPtr(), dataPtr, (size_t)inputSize);
873 flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), inputSize);
875 m_vertexBuffers.push_back(VkBufferSp(new vk::Unique<VkBuffer>(buffer)));
876 m_vertexBufferAllocs.push_back(AllocationSp(alloc.release()));
879 void ShaderRenderCaseInstance::useAttribute (deUint32 bindingLocation, BaseAttributeType type)
881 const EnabledBaseAttribute attribute =
883 bindingLocation, // deUint32 location;
884 type // BaseAttributeType type;
886 m_enabledBaseAttributes.push_back(attribute);
889 void ShaderRenderCaseInstance::setupUniforms (const tcu::Vec4& constCoords)
892 m_uniformSetup->setup(*this, constCoords);
895 void ShaderRenderCaseInstance::useUniform (deUint32 bindingLocation, BaseUniformType type)
897 #define UNIFORM_CASE(type, value) case type: addUniform(bindingLocation, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, value); break
902 UNIFORM_CASE(UB_FALSE, 0);
903 UNIFORM_CASE(UB_TRUE, 1);
906 UNIFORM_CASE(UB4_FALSE, tcu::Vec4(0));
907 UNIFORM_CASE(UB4_TRUE, tcu::Vec4(1));
910 UNIFORM_CASE(UI_ZERO, 0);
911 UNIFORM_CASE(UI_ONE, 1);
912 UNIFORM_CASE(UI_TWO, 2);
913 UNIFORM_CASE(UI_THREE, 3);
914 UNIFORM_CASE(UI_FOUR, 4);
915 UNIFORM_CASE(UI_FIVE, 5);
916 UNIFORM_CASE(UI_SIX, 6);
917 UNIFORM_CASE(UI_SEVEN, 7);
918 UNIFORM_CASE(UI_EIGHT, 8);
919 UNIFORM_CASE(UI_ONEHUNDREDONE, 101);
922 UNIFORM_CASE(UI2_MINUS_ONE, tcu::IVec2(-1));
923 UNIFORM_CASE(UI2_ZERO, tcu::IVec2(0));
924 UNIFORM_CASE(UI2_ONE, tcu::IVec2(1));
925 UNIFORM_CASE(UI2_TWO, tcu::IVec2(2));
926 UNIFORM_CASE(UI2_THREE, tcu::IVec2(3));
927 UNIFORM_CASE(UI2_FOUR, tcu::IVec2(4));
928 UNIFORM_CASE(UI2_FIVE, tcu::IVec2(5));
931 UNIFORM_CASE(UI3_MINUS_ONE, tcu::IVec3(-1));
932 UNIFORM_CASE(UI3_ZERO, tcu::IVec3(0));
933 UNIFORM_CASE(UI3_ONE, tcu::IVec3(1));
934 UNIFORM_CASE(UI3_TWO, tcu::IVec3(2));
935 UNIFORM_CASE(UI3_THREE, tcu::IVec3(3));
936 UNIFORM_CASE(UI3_FOUR, tcu::IVec3(4));
937 UNIFORM_CASE(UI3_FIVE, tcu::IVec3(5));
940 UNIFORM_CASE(UI4_MINUS_ONE, tcu::IVec4(-1));
941 UNIFORM_CASE(UI4_ZERO, tcu::IVec4(0));
942 UNIFORM_CASE(UI4_ONE, tcu::IVec4(1));
943 UNIFORM_CASE(UI4_TWO, tcu::IVec4(2));
944 UNIFORM_CASE(UI4_THREE, tcu::IVec4(3));
945 UNIFORM_CASE(UI4_FOUR, tcu::IVec4(4));
946 UNIFORM_CASE(UI4_FIVE, tcu::IVec4(5));
949 UNIFORM_CASE(UF_ZERO, 0.0f);
950 UNIFORM_CASE(UF_ONE, 1.0f);
951 UNIFORM_CASE(UF_TWO, 2.0f);
952 UNIFORM_CASE(UF_THREE, 3.0f);
953 UNIFORM_CASE(UF_FOUR, 4.0f);
954 UNIFORM_CASE(UF_FIVE, 5.0f);
955 UNIFORM_CASE(UF_SIX, 6.0f);
956 UNIFORM_CASE(UF_SEVEN, 7.0f);
957 UNIFORM_CASE(UF_EIGHT, 8.0f);
959 UNIFORM_CASE(UF_HALF, 1.0f / 2.0f);
960 UNIFORM_CASE(UF_THIRD, 1.0f / 3.0f);
961 UNIFORM_CASE(UF_FOURTH, 1.0f / 4.0f);
962 UNIFORM_CASE(UF_FIFTH, 1.0f / 5.0f);
963 UNIFORM_CASE(UF_SIXTH, 1.0f / 6.0f);
964 UNIFORM_CASE(UF_SEVENTH, 1.0f / 7.0f);
965 UNIFORM_CASE(UF_EIGHTH, 1.0f / 8.0f);
968 UNIFORM_CASE(UV2_MINUS_ONE, tcu::Vec2(-1.0f));
969 UNIFORM_CASE(UV2_ZERO, tcu::Vec2(0.0f));
970 UNIFORM_CASE(UV2_ONE, tcu::Vec2(1.0f));
971 UNIFORM_CASE(UV2_TWO, tcu::Vec2(2.0f));
972 UNIFORM_CASE(UV2_THREE, tcu::Vec2(3.0f));
974 UNIFORM_CASE(UV2_HALF, tcu::Vec2(1.0f / 2.0f));
977 UNIFORM_CASE(UV3_MINUS_ONE, tcu::Vec3(-1.0f));
978 UNIFORM_CASE(UV3_ZERO, tcu::Vec3(0.0f));
979 UNIFORM_CASE(UV3_ONE, tcu::Vec3(1.0f));
980 UNIFORM_CASE(UV3_TWO, tcu::Vec3(2.0f));
981 UNIFORM_CASE(UV3_THREE, tcu::Vec3(3.0f));
983 UNIFORM_CASE(UV3_HALF, tcu::Vec3(1.0f / 2.0f));
986 UNIFORM_CASE(UV4_MINUS_ONE, tcu::Vec4(-1.0f));
987 UNIFORM_CASE(UV4_ZERO, tcu::Vec4(0.0f));
988 UNIFORM_CASE(UV4_ONE, tcu::Vec4(1.0f));
989 UNIFORM_CASE(UV4_TWO, tcu::Vec4(2.0f));
990 UNIFORM_CASE(UV4_THREE, tcu::Vec4(3.0f));
992 UNIFORM_CASE(UV4_HALF, tcu::Vec4(1.0f / 2.0f));
994 UNIFORM_CASE(UV4_BLACK, tcu::Vec4(0.0f, 0.0f, 0.0f, 1.0f));
995 UNIFORM_CASE(UV4_GRAY, tcu::Vec4(0.5f, 0.5f, 0.5f, 1.0f));
996 UNIFORM_CASE(UV4_WHITE, tcu::Vec4(1.0f, 1.0f, 1.0f, 1.0f));
999 m_context.getTestContext().getLog() << tcu::TestLog::Message << "Unknown Uniform type: " << type << tcu::TestLog::EndMessage;
1006 const tcu::UVec2 ShaderRenderCaseInstance::getViewportSize (void) const
1008 return tcu::UVec2(de::min(m_renderSize.x(), MAX_RENDER_WIDTH),
1009 de::min(m_renderSize.y(), MAX_RENDER_HEIGHT));
1012 void ShaderRenderCaseInstance::setSampleCount (VkSampleCountFlagBits sampleCount)
1014 m_sampleCount = sampleCount;
1017 bool ShaderRenderCaseInstance::isMultiSampling (void) const
1019 return m_sampleCount != VK_SAMPLE_COUNT_1_BIT;
1022 void ShaderRenderCaseInstance::uploadImage (const tcu::TextureFormat& texFormat,
1023 const TextureData& textureData,
1024 const tcu::Sampler& refSampler,
1026 deUint32 arrayLayers,
1029 const VkDevice vkDevice = getDevice();
1030 const DeviceInterface& vk = getDeviceInterface();
1031 const VkQueue queue = getUniversalQueue();
1032 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
1034 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1035 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1036 deUint32 bufferSize = 0u;
1037 Move<VkBuffer> buffer;
1038 de::MovePtr<Allocation> bufferAlloc;
1039 Move<VkCommandPool> cmdPool;
1040 Move<VkCommandBuffer> cmdBuffer;
1041 Move<VkFence> fence;
1042 std::vector<VkBufferImageCopy> copyRegions;
1043 std::vector<deUint32> offsetMultiples;
1045 offsetMultiples.push_back(4u);
1046 offsetMultiples.push_back(texFormat.getPixelSize());
1048 // Calculate buffer size
1049 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1051 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1053 const tcu::ConstPixelBufferAccess& access = *lit;
1055 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1056 bufferSize += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1060 // Create source buffer
1062 const VkBufferCreateInfo bufferParams =
1064 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1065 DE_NULL, // const void* pNext;
1066 0u, // VkBufferCreateFlags flags;
1067 bufferSize, // VkDeviceSize size;
1068 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1069 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1070 0u, // deUint32 queueFamilyIndexCount;
1071 DE_NULL, // const deUint32* pQueueFamilyIndices;
1074 buffer = createBuffer(vk, vkDevice, &bufferParams);
1075 bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1076 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1079 // Create command pool and buffer
1080 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1081 cmdBuffer = allocateCommandBuffer(vk, vkDevice, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1084 fence = createFence(vk, vkDevice);
1086 // Barriers for copying buffer to image
1087 const VkBufferMemoryBarrier preBufferBarrier =
1089 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1090 DE_NULL, // const void* pNext;
1091 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1092 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1093 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1094 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1095 *buffer, // VkBuffer buffer;
1096 0u, // VkDeviceSize offset;
1097 bufferSize // VkDeviceSize size;
1100 const VkImageMemoryBarrier preImageBarrier =
1102 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1103 DE_NULL, // const void* pNext;
1104 0u, // VkAccessFlags srcAccessMask;
1105 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1106 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1107 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1108 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1109 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1110 destImage, // VkImage image;
1111 { // VkImageSubresourceRange subresourceRange;
1112 aspectMask, // VkImageAspect aspect;
1113 0u, // deUint32 baseMipLevel;
1114 mipLevels, // deUint32 mipLevels;
1115 0u, // deUint32 baseArraySlice;
1116 arrayLayers // deUint32 arraySize;
1120 const VkImageMemoryBarrier postImageBarrier =
1122 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1123 DE_NULL, // const void* pNext;
1124 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1125 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1126 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1127 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1128 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1129 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1130 destImage, // VkImage image;
1131 { // VkImageSubresourceRange subresourceRange;
1132 aspectMask, // VkImageAspect aspect;
1133 0u, // deUint32 baseMipLevel;
1134 mipLevels, // deUint32 mipLevels;
1135 0u, // deUint32 baseArraySlice;
1136 arrayLayers // deUint32 arraySize;
1140 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
1142 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1143 DE_NULL, // const void* pNext;
1144 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
1145 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1148 // Get copy regions and write buffer data
1150 deUint32 layerDataOffset = 0;
1151 deUint8* destPtr = (deUint8*)bufferAlloc->getHostPtr();
1153 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1155 const TextureLayerData& layerData = textureData[levelNdx];
1157 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1159 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1161 const tcu::ConstPixelBufferAccess& access = layerData[layerNdx];
1162 const tcu::PixelBufferAccess destAccess (access.getFormat(), access.getSize(), destPtr + layerDataOffset);
1164 const VkBufferImageCopy layerRegion =
1166 layerDataOffset, // VkDeviceSize bufferOffset;
1167 (deUint32)access.getWidth(), // deUint32 bufferRowLength;
1168 (deUint32)access.getHeight(), // deUint32 bufferImageHeight;
1169 { // VkImageSubresourceLayers imageSubresource;
1170 aspectMask, // VkImageAspectFlags aspectMask;
1171 (deUint32)levelNdx, // uint32_t mipLevel;
1172 (deUint32)layerNdx, // uint32_t baseArrayLayer;
1173 1u // uint32_t layerCount;
1175 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1176 { // VkExtent3D imageExtent;
1177 (deUint32)access.getWidth(),
1178 (deUint32)access.getHeight(),
1179 (deUint32)access.getDepth()
1183 copyRegions.push_back(layerRegion);
1184 tcu::copy(destAccess, access);
1186 layerDataOffset += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1191 flushMappedMemoryRange(vk, vkDevice, bufferAlloc->getMemory(), bufferAlloc->getOffset(), bufferSize);
1193 // Copy buffer to image
1194 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
1195 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
1196 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
1197 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
1198 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1200 const VkSubmitInfo submitInfo =
1202 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1203 DE_NULL, // const void* pNext;
1204 0u, // deUint32 waitSemaphoreCount;
1205 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1206 DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
1207 1u, // deUint32 commandBufferCount;
1208 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1209 0u, // deUint32 signalSemaphoreCount;
1210 DE_NULL // const VkSemaphore* pSignalSemaphores;
1213 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
1214 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
1217 void ShaderRenderCaseInstance::clearImage (const tcu::Sampler& refSampler,
1219 deUint32 arrayLayers,
1222 const VkDevice vkDevice = m_context.getDevice();
1223 const DeviceInterface& vk = m_context.getDeviceInterface();
1224 const VkQueue queue = m_context.getUniversalQueue();
1225 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1227 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1228 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1229 Move<VkCommandPool> cmdPool;
1230 Move<VkCommandBuffer> cmdBuffer;
1231 Move<VkFence> fence;
1233 VkClearValue clearValue;
1234 deMemset(&clearValue, 0, sizeof(clearValue));
1237 // Create command pool and buffer
1238 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1239 cmdBuffer = allocateCommandBuffer(vk, vkDevice, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1242 fence = createFence(vk, vkDevice);
1244 const VkImageMemoryBarrier preImageBarrier =
1246 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1247 DE_NULL, // const void* pNext;
1248 0u, // VkAccessFlags srcAccessMask;
1249 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1250 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1251 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1252 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1253 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1254 destImage, // VkImage image;
1255 { // VkImageSubresourceRange subresourceRange;
1256 aspectMask, // VkImageAspect aspect;
1257 0u, // deUint32 baseMipLevel;
1258 mipLevels, // deUint32 mipLevels;
1259 0u, // deUint32 baseArraySlice;
1260 arrayLayers // deUint32 arraySize;
1264 const VkImageMemoryBarrier postImageBarrier =
1266 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1267 DE_NULL, // const void* pNext;
1268 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1269 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1270 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1271 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1272 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1273 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1274 destImage, // VkImage image;
1275 { // VkImageSubresourceRange subresourceRange;
1276 aspectMask, // VkImageAspect aspect;
1277 0u, // deUint32 baseMipLevel;
1278 mipLevels, // deUint32 mipLevels;
1279 0u, // deUint32 baseArraySlice;
1280 arrayLayers // deUint32 arraySize;
1284 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
1286 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1287 DE_NULL, // const void* pNext;
1288 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
1289 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1293 const VkImageSubresourceRange clearRange =
1295 aspectMask, // VkImageAspectFlags aspectMask;
1296 0u, // deUint32 baseMipLevel;
1297 mipLevels, // deUint32 levelCount;
1298 0u, // deUint32 baseArrayLayer;
1299 arrayLayers // deUint32 layerCount;
1302 // Copy buffer to image
1303 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
1304 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
1305 if (aspectMask == VK_IMAGE_ASPECT_COLOR_BIT)
1307 vk.cmdClearColorImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.color, 1, &clearRange);
1311 vk.cmdClearDepthStencilImage(*cmdBuffer, destImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue.depthStencil, 1, &clearRange);
1313 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
1314 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1316 const VkSubmitInfo submitInfo =
1318 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1319 DE_NULL, // const void* pNext;
1320 0u, // deUint32 waitSemaphoreCount;
1321 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1322 DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
1323 1u, // deUint32 commandBufferCount;
1324 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1325 0u, // deUint32 signalSemaphoreCount;
1326 DE_NULL // const VkSemaphore* pSignalSemaphores;
1329 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
1330 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
1333 VkExtent3D mipLevelExtents (const VkExtent3D& baseExtents, const deUint32 mipLevel)
1337 result.width = std::max(baseExtents.width >> mipLevel, 1u);
1338 result.height = std::max(baseExtents.height >> mipLevel, 1u);
1339 result.depth = std::max(baseExtents.depth >> mipLevel, 1u);
1344 tcu::UVec3 alignedDivide (const VkExtent3D& extent, const VkExtent3D& divisor)
1348 result.x() = extent.width / divisor.width + ((extent.width % divisor.width != 0) ? 1u : 0u);
1349 result.y() = extent.height / divisor.height + ((extent.height % divisor.height != 0) ? 1u : 0u);
1350 result.z() = extent.depth / divisor.depth + ((extent.depth % divisor.depth != 0) ? 1u : 0u);
1355 bool isImageSizeSupported (const VkImageType imageType, const tcu::UVec3& imageSize, const vk::VkPhysicalDeviceLimits& limits)
1359 case VK_IMAGE_TYPE_1D:
1360 return (imageSize.x() <= limits.maxImageDimension1D
1361 && imageSize.y() == 1
1362 && imageSize.z() == 1);
1363 case VK_IMAGE_TYPE_2D:
1364 return (imageSize.x() <= limits.maxImageDimension2D
1365 && imageSize.y() <= limits.maxImageDimension2D
1366 && imageSize.z() == 1);
1367 case VK_IMAGE_TYPE_3D:
1368 return (imageSize.x() <= limits.maxImageDimension3D
1369 && imageSize.y() <= limits.maxImageDimension3D
1370 && imageSize.z() <= limits.maxImageDimension3D);
1372 DE_FATAL("Unknown image type");
1377 void ShaderRenderCaseInstance::checkSparseSupport (const VkImageType imageType) const
1379 const InstanceInterface& instance = getInstanceInterface();
1380 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1381 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instance, physicalDevice);
1383 if (!deviceFeatures.shaderResourceResidency)
1384 TCU_THROW(NotSupportedError, "Required feature: shaderResourceResidency.");
1386 if (!deviceFeatures.sparseBinding)
1387 TCU_THROW(NotSupportedError, "Required feature: sparseBinding.");
1389 if (imageType == VK_IMAGE_TYPE_2D && !deviceFeatures.sparseResidencyImage2D)
1390 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage2D.");
1392 if (imageType == VK_IMAGE_TYPE_3D && !deviceFeatures.sparseResidencyImage3D)
1393 TCU_THROW(NotSupportedError, "Required feature: sparseResidencyImage3D.");
1396 void ShaderRenderCaseInstance::uploadSparseImage (const tcu::TextureFormat& texFormat,
1397 const TextureData& textureData,
1398 const tcu::Sampler& refSampler,
1399 const deUint32 mipLevels,
1400 const deUint32 arrayLayers,
1401 const VkImage sparseImage,
1402 const VkImageCreateInfo& imageCreateInfo,
1403 const tcu::UVec3 texSize)
1405 const VkDevice vkDevice = getDevice();
1406 const DeviceInterface& vk = getDeviceInterface();
1407 const VkPhysicalDevice physicalDevice = getPhysicalDevice();
1408 const VkQueue queue = getUniversalQueue();
1409 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
1410 const InstanceInterface& instance = getInstanceInterface();
1411 const VkPhysicalDeviceProperties deviceProperties = getPhysicalDeviceProperties(instance, physicalDevice);
1412 const VkPhysicalDeviceMemoryProperties deviceMemoryProperties = getPhysicalDeviceMemoryProperties(instance, physicalDevice);
1413 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
1414 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
1416 const Unique<VkSemaphore> imageMemoryBindSemaphore(createSemaphore(vk, vkDevice));
1417 deUint32 bufferSize = 0u;
1418 std::vector<deUint32> offsetMultiples;
1419 offsetMultiples.push_back(4u);
1420 offsetMultiples.push_back(texFormat.getPixelSize());
1422 if (isImageSizeSupported(imageCreateInfo.imageType, texSize, deviceProperties.limits) == false)
1423 TCU_THROW(NotSupportedError, "Image size not supported for device.");
1425 // Calculate buffer size
1426 for (TextureData::const_iterator mit = textureData.begin(); mit != textureData.end(); ++mit)
1428 for (TextureLayerData::const_iterator lit = mit->begin(); lit != mit->end(); ++lit)
1430 const tcu::ConstPixelBufferAccess& access = *lit;
1432 bufferSize = getNextMultiple(offsetMultiples, bufferSize);
1433 bufferSize += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1438 deUint32 sparseMemoryReqCount = 0;
1440 vk.getImageSparseMemoryRequirements(vkDevice, sparseImage, &sparseMemoryReqCount, DE_NULL);
1442 DE_ASSERT(sparseMemoryReqCount != 0);
1444 std::vector<VkSparseImageMemoryRequirements> sparseImageMemoryRequirements;
1445 sparseImageMemoryRequirements.resize(sparseMemoryReqCount);
1447 vk.getImageSparseMemoryRequirements(vkDevice, sparseImage, &sparseMemoryReqCount, &sparseImageMemoryRequirements[0]);
1449 const deUint32 noMatchFound = ~((deUint32)0);
1451 deUint32 colorAspectIndex = noMatchFound;
1452 for (deUint32 memoryReqNdx = 0; memoryReqNdx < sparseMemoryReqCount; ++memoryReqNdx)
1454 if (sparseImageMemoryRequirements[memoryReqNdx].formatProperties.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT)
1456 colorAspectIndex = memoryReqNdx;
1461 if (colorAspectIndex == noMatchFound)
1462 TCU_THROW(NotSupportedError, "Not supported image aspect - the test supports currently only VK_IMAGE_ASPECT_COLOR_BIT.");
1464 const VkMemoryRequirements memoryRequirements = getImageMemoryRequirements(vk, vkDevice, sparseImage);
1466 deUint32 memoryType = noMatchFound;
1467 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemoryProperties.memoryTypeCount; ++memoryTypeNdx)
1469 if ((memoryRequirements.memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
1470 MemoryRequirement::Any.matchesHeap(deviceMemoryProperties.memoryTypes[memoryTypeNdx].propertyFlags))
1472 memoryType = memoryTypeNdx;
1477 if (memoryType == noMatchFound)
1478 TCU_THROW(NotSupportedError, "No matching memory type found.");
1480 if (memoryRequirements.size > deviceProperties.limits.sparseAddressSpaceSize)
1481 TCU_THROW(NotSupportedError, "Required memory size for sparse resource exceeds device limits.");
1483 // Check if the image format supports sparse oprerations
1484 const std::vector<VkSparseImageFormatProperties> sparseImageFormatPropVec =
1485 getPhysicalDeviceSparseImageFormatProperties(instance, physicalDevice, imageCreateInfo.format, imageCreateInfo.imageType, imageCreateInfo.samples, imageCreateInfo.usage, imageCreateInfo.tiling);
1487 if (sparseImageFormatPropVec.size() == 0)
1488 TCU_THROW(NotSupportedError, "The image format does not support sparse operations.");
1490 const VkSparseImageMemoryRequirements aspectRequirements = sparseImageMemoryRequirements[colorAspectIndex];
1491 const VkExtent3D imageGranularity = aspectRequirements.formatProperties.imageGranularity;
1493 std::vector<VkSparseImageMemoryBind> imageResidencyMemoryBinds;
1494 std::vector<VkSparseMemoryBind> imageMipTailMemoryBinds;
1496 for (deUint32 layerNdx = 0; layerNdx < arrayLayers; ++ layerNdx)
1498 for (deUint32 mipLevelNdx = 0; mipLevelNdx < aspectRequirements.imageMipTailFirstLod; ++mipLevelNdx)
1500 const VkExtent3D mipExtent = mipLevelExtents(imageCreateInfo.extent, mipLevelNdx);
1501 const tcu::UVec3 numSparseBinds = alignedDivide(mipExtent, imageGranularity);
1502 const tcu::UVec3 lastBlockExtent = tcu::UVec3(mipExtent.width % imageGranularity.width ? mipExtent.width % imageGranularity.width : imageGranularity.width,
1503 mipExtent.height % imageGranularity.height ? mipExtent.height % imageGranularity.height : imageGranularity.height,
1504 mipExtent.depth % imageGranularity.depth ? mipExtent.depth % imageGranularity.depth : imageGranularity.depth );
1506 for (deUint32 z = 0; z < numSparseBinds.z(); ++z)
1507 for (deUint32 y = 0; y < numSparseBinds.y(); ++y)
1508 for (deUint32 x = 0; x < numSparseBinds.x(); ++x)
1510 const VkMemoryRequirements allocRequirements =
1512 // 28.7.5 alignment shows the block size in bytes
1513 memoryRequirements.alignment, // VkDeviceSize size;
1514 memoryRequirements.alignment, // VkDeviceSize alignment;
1515 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
1518 de::SharedPtr<Allocation> allocation(m_memAlloc.allocate(allocRequirements, MemoryRequirement::Any).release());
1520 m_allocations.push_back(allocation);
1523 offset.x = x*imageGranularity.width;
1524 offset.y = y*imageGranularity.height;
1525 offset.z = z*imageGranularity.depth;
1528 extent.width = (x == numSparseBinds.x() - 1) ? lastBlockExtent.x() : imageGranularity.width;
1529 extent.height = (y == numSparseBinds.y() - 1) ? lastBlockExtent.y() : imageGranularity.height;
1530 extent.depth = (z == numSparseBinds.z() - 1) ? lastBlockExtent.z() : imageGranularity.depth;
1532 const VkSparseImageMemoryBind imageMemoryBind =
1535 aspectMask, // VkImageAspectFlags aspectMask;
1536 mipLevelNdx,// uint32_t mipLevel;
1537 layerNdx, // uint32_t arrayLayer;
1538 }, // VkImageSubresource subresource;
1539 offset, // VkOffset3D offset;
1540 extent, // VkExtent3D extent;
1541 allocation->getMemory(), // VkDeviceMemory memory;
1542 allocation->getOffset(), // VkDeviceSize memoryOffset;
1543 0u, // VkSparseMemoryBindFlags flags;
1546 imageResidencyMemoryBinds.push_back(imageMemoryBind);
1550 // Handle MIP tail. There are two cases to consider here:
1552 // 1) VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT is requested by the driver: each layer needs a separate tail.
1553 // 2) otherwise: only one tail is needed.
1555 if ( imageMipTailMemoryBinds.size() == 0 ||
1556 (imageMipTailMemoryBinds.size() != 0 && (aspectRequirements.formatProperties.flags & VK_SPARSE_IMAGE_FORMAT_SINGLE_MIPTAIL_BIT) == 0))
1558 const VkMemoryRequirements allocRequirements =
1560 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
1561 memoryRequirements.alignment, // VkDeviceSize alignment;
1562 memoryRequirements.memoryTypeBits, // uint32_t memoryTypeBits;
1565 const de::SharedPtr<Allocation> allocation(m_memAlloc.allocate(allocRequirements, MemoryRequirement::Any).release());
1567 const VkSparseMemoryBind imageMipTailMemoryBind =
1569 aspectRequirements.imageMipTailOffset + layerNdx * aspectRequirements.imageMipTailStride, // VkDeviceSize resourceOffset;
1570 aspectRequirements.imageMipTailSize, // VkDeviceSize size;
1571 allocation->getMemory(), // VkDeviceMemory memory;
1572 allocation->getOffset(), // VkDeviceSize memoryOffset;
1573 0u, // VkSparseMemoryBindFlags flags;
1576 m_allocations.push_back(allocation);
1577 imageMipTailMemoryBinds.push_back(imageMipTailMemoryBind);
1582 VkBindSparseInfo bindSparseInfo =
1584 VK_STRUCTURE_TYPE_BIND_SPARSE_INFO, //VkStructureType sType;
1585 DE_NULL, //const void* pNext;
1586 0u, //deUint32 waitSemaphoreCount;
1587 DE_NULL, //const VkSemaphore* pWaitSemaphores;
1588 0u, //deUint32 bufferBindCount;
1589 DE_NULL, //const VkSparseBufferMemoryBindInfo* pBufferBinds;
1590 0u, //deUint32 imageOpaqueBindCount;
1591 DE_NULL, //const VkSparseImageOpaqueMemoryBindInfo* pImageOpaqueBinds;
1592 0u, //deUint32 imageBindCount;
1593 DE_NULL, //const VkSparseImageMemoryBindInfo* pImageBinds;
1594 1u, //deUint32 signalSemaphoreCount;
1595 &imageMemoryBindSemaphore.get() //const VkSemaphore* pSignalSemaphores;
1598 VkSparseImageMemoryBindInfo imageResidencyBindInfo;
1599 VkSparseImageOpaqueMemoryBindInfo imageMipTailBindInfo;
1601 if (imageResidencyMemoryBinds.size() > 0)
1603 imageResidencyBindInfo.image = sparseImage;
1604 imageResidencyBindInfo.bindCount = static_cast<deUint32>(imageResidencyMemoryBinds.size());
1605 imageResidencyBindInfo.pBinds = &imageResidencyMemoryBinds[0];
1607 bindSparseInfo.imageBindCount = 1u;
1608 bindSparseInfo.pImageBinds = &imageResidencyBindInfo;
1611 if (imageMipTailMemoryBinds.size() > 0)
1613 imageMipTailBindInfo.image = sparseImage;
1614 imageMipTailBindInfo.bindCount = static_cast<deUint32>(imageMipTailMemoryBinds.size());
1615 imageMipTailBindInfo.pBinds = &imageMipTailMemoryBinds[0];
1617 bindSparseInfo.imageOpaqueBindCount = 1u;
1618 bindSparseInfo.pImageOpaqueBinds = &imageMipTailBindInfo;
1621 VK_CHECK(vk.queueBindSparse(queue, 1u, &bindSparseInfo, DE_NULL));
1624 Move<VkCommandPool> cmdPool;
1625 Move<VkCommandBuffer> cmdBuffer;
1627 // Create command pool
1628 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
1630 // Create command buffer
1631 cmdBuffer = allocateCommandBuffer(vk, vkDevice, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1633 // Create source buffer
1634 const VkBufferCreateInfo bufferParams =
1636 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1637 DE_NULL, // const void* pNext;
1638 0u, // VkBufferCreateFlags flags;
1639 bufferSize, // VkDeviceSize size;
1640 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // VkBufferUsageFlags usage;
1641 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1642 0u, // deUint32 queueFamilyIndexCount;
1643 DE_NULL, // const deUint32* pQueueFamilyIndices;
1646 Move<VkBuffer> buffer = createBuffer(vk, vkDevice, &bufferParams);
1647 de::MovePtr<Allocation> bufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *buffer), MemoryRequirement::HostVisible);
1648 VK_CHECK(vk.bindBufferMemory(vkDevice, *buffer, bufferAlloc->getMemory(), bufferAlloc->getOffset()));
1650 // Barriers for copying buffer to image
1651 const VkBufferMemoryBarrier preBufferBarrier =
1653 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1654 DE_NULL, // const void* pNext;
1655 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1656 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1657 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1658 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1659 *buffer, // VkBuffer buffer;
1660 0u, // VkDeviceSize offset;
1661 bufferSize // VkDeviceSize size;
1664 const VkImageMemoryBarrier preImageBarrier =
1666 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1667 DE_NULL, // const void* pNext;
1668 0u, // VkAccessFlags srcAccessMask;
1669 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1670 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1671 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1672 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1673 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1674 sparseImage, // VkImage image;
1675 { // VkImageSubresourceRange subresourceRange;
1676 aspectMask, // VkImageAspect aspect;
1677 0u, // deUint32 baseMipLevel;
1678 mipLevels, // deUint32 mipLevels;
1679 0u, // deUint32 baseArraySlice;
1680 arrayLayers // deUint32 arraySize;
1684 const VkImageMemoryBarrier postImageBarrier =
1686 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1687 DE_NULL, // const void* pNext;
1688 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1689 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1690 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1691 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout newLayout;
1692 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1693 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1694 sparseImage, // VkImage image;
1695 { // VkImageSubresourceRange subresourceRange;
1696 aspectMask, // VkImageAspect aspect;
1697 0u, // deUint32 baseMipLevel;
1698 mipLevels, // deUint32 mipLevels;
1699 0u, // deUint32 baseArraySlice;
1700 arrayLayers // deUint32 arraySize;
1704 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
1706 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1707 DE_NULL, // const void* pNext;
1708 VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
1709 (const VkCommandBufferInheritanceInfo*)DE_NULL,
1712 std::vector<VkBufferImageCopy> copyRegions;
1713 // Get copy regions and write buffer data
1715 deUint32 layerDataOffset = 0;
1716 deUint8* destPtr = (deUint8*)bufferAlloc->getHostPtr();
1718 for (size_t levelNdx = 0; levelNdx < textureData.size(); levelNdx++)
1720 const TextureLayerData& layerData = textureData[levelNdx];
1722 for (size_t layerNdx = 0; layerNdx < layerData.size(); layerNdx++)
1724 layerDataOffset = getNextMultiple(offsetMultiples, layerDataOffset);
1726 const tcu::ConstPixelBufferAccess& access = layerData[layerNdx];
1727 const tcu::PixelBufferAccess destAccess (access.getFormat(), access.getSize(), destPtr + layerDataOffset);
1729 const VkBufferImageCopy layerRegion =
1731 layerDataOffset, // VkDeviceSize bufferOffset;
1732 (deUint32)access.getWidth(), // deUint32 bufferRowLength;
1733 (deUint32)access.getHeight(), // deUint32 bufferImageHeight;
1734 { // VkImageSubresourceLayers imageSubresource;
1735 aspectMask, // VkImageAspectFlags aspectMask;
1736 (deUint32)levelNdx, // uint32_t mipLevel;
1737 (deUint32)layerNdx, // uint32_t baseArrayLayer;
1738 1u // uint32_t layerCount;
1740 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1741 { // VkExtent3D imageExtent;
1742 (deUint32)access.getWidth(),
1743 (deUint32)access.getHeight(),
1744 (deUint32)access.getDepth()
1748 copyRegions.push_back(layerRegion);
1749 tcu::copy(destAccess, access);
1751 layerDataOffset += access.getWidth() * access.getHeight() * access.getDepth() * access.getFormat().getPixelSize();
1756 // Copy buffer to image
1757 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
1758 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &preBufferBarrier, 1, &preImageBarrier);
1759 vk.cmdCopyBufferToImage(*cmdBuffer, *buffer, sparseImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, (deUint32)copyRegions.size(), copyRegions.data());
1760 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
1761 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1763 const VkPipelineStageFlags pipelineStageFlags = VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
1765 const VkSubmitInfo submitInfo =
1767 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1768 DE_NULL, // const void* pNext;
1769 1u, // deUint32 waitSemaphoreCount;
1770 &imageMemoryBindSemaphore.get(), // const VkSemaphore* pWaitSemaphores;
1771 &pipelineStageFlags, // const VkPipelineStageFlags* pWaitDstStageMask;
1772 1u, // deUint32 commandBufferCount;
1773 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1774 0u, // deUint32 signalSemaphoreCount;
1775 DE_NULL // const VkSemaphore* pSignalSemaphores;
1778 Move<VkFence> fence = createFence(vk, vkDevice);
1782 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
1783 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
1787 VK_CHECK(vk.deviceWaitIdle(vkDevice));
1792 void ShaderRenderCaseInstance::useSampler (deUint32 bindingLocation, deUint32 textureId)
1794 DE_ASSERT(textureId < m_textures.size());
1796 const TextureBinding& textureBinding = *m_textures[textureId];
1797 const TextureBinding::Type textureType = textureBinding.getType();
1798 const tcu::Sampler& refSampler = textureBinding.getSampler();
1799 const TextureBinding::Parameters& textureParams = textureBinding.getParameters();
1800 const bool isMSTexture = textureParams.samples != vk::VK_SAMPLE_COUNT_1_BIT;
1801 deUint32 mipLevels = 1u;
1802 deUint32 arrayLayers = 1u;
1803 tcu::TextureFormat texFormat;
1805 TextureData textureData;
1807 if (textureType == TextureBinding::TYPE_2D)
1809 const tcu::Texture2D& texture = textureBinding.get2D();
1811 texFormat = texture.getFormat();
1812 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1813 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1816 textureData.resize(mipLevels);
1818 for (deUint32 level = 0; level < mipLevels; ++level)
1820 if (texture.isLevelEmpty(level))
1823 textureData[level].push_back(texture.getLevel(level));
1826 else if (textureType == TextureBinding::TYPE_CUBE_MAP)
1828 const tcu::TextureCube& texture = textureBinding.getCube();
1830 texFormat = texture.getFormat();
1831 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1u);
1832 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1835 static const tcu::CubeFace cubeFaceMapping[tcu::CUBEFACE_LAST] =
1837 tcu::CUBEFACE_POSITIVE_X,
1838 tcu::CUBEFACE_NEGATIVE_X,
1839 tcu::CUBEFACE_POSITIVE_Y,
1840 tcu::CUBEFACE_NEGATIVE_Y,
1841 tcu::CUBEFACE_POSITIVE_Z,
1842 tcu::CUBEFACE_NEGATIVE_Z
1845 textureData.resize(mipLevels);
1847 for (deUint32 level = 0; level < mipLevels; ++level)
1849 for (int faceNdx = 0; faceNdx < tcu::CUBEFACE_LAST; ++faceNdx)
1851 tcu::CubeFace face = cubeFaceMapping[faceNdx];
1853 if (texture.isLevelEmpty(face, level))
1856 textureData[level].push_back(texture.getLevelFace(level, face));
1860 else if (textureType == TextureBinding::TYPE_2D_ARRAY)
1862 const tcu::Texture2DArray& texture = textureBinding.get2DArray();
1864 texFormat = texture.getFormat();
1865 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), 1u);
1866 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1867 arrayLayers = (deUint32)texture.getNumLayers();
1869 textureData.resize(mipLevels);
1871 for (deUint32 level = 0; level < mipLevels; ++level)
1873 if (texture.isLevelEmpty(level))
1876 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
1877 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1879 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
1881 const deUint32 layerOffset = layerSize * layer;
1882 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1883 textureData[level].push_back(layerData);
1887 else if (textureType == TextureBinding::TYPE_3D)
1889 const tcu::Texture3D& texture = textureBinding.get3D();
1891 texFormat = texture.getFormat();
1892 texSize = tcu::UVec3(texture.getWidth(), texture.getHeight(), texture.getDepth());
1893 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1896 textureData.resize(mipLevels);
1898 for (deUint32 level = 0; level < mipLevels; ++level)
1900 if (texture.isLevelEmpty(level))
1903 textureData[level].push_back(texture.getLevel(level));
1906 else if (textureType == TextureBinding::TYPE_1D)
1908 const tcu::Texture1D& texture = textureBinding.get1D();
1910 texFormat = texture.getFormat();
1911 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
1912 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1915 textureData.resize(mipLevels);
1917 for (deUint32 level = 0; level < mipLevels; ++level)
1919 if (texture.isLevelEmpty(level))
1922 textureData[level].push_back(texture.getLevel(level));
1925 else if (textureType == TextureBinding::TYPE_1D_ARRAY)
1927 const tcu::Texture1DArray& texture = textureBinding.get1DArray();
1929 texFormat = texture.getFormat();
1930 texSize = tcu::UVec3(texture.getWidth(), 1, 1);
1931 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1932 arrayLayers = (deUint32)texture.getNumLayers();
1934 textureData.resize(mipLevels);
1936 for (deUint32 level = 0; level < mipLevels; ++level)
1938 if (texture.isLevelEmpty(level))
1941 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
1942 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getFormat().getPixelSize();
1944 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
1946 const deUint32 layerOffset = layerSize * layer;
1947 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), 1, 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1948 textureData[level].push_back(layerData);
1952 else if (textureType == TextureBinding::TYPE_CUBE_ARRAY)
1954 const tcu::TextureCubeArray& texture = textureBinding.getCubeArray();
1955 texFormat = texture.getFormat();
1956 texSize = tcu::UVec3(texture.getSize(), texture.getSize(), 1);
1957 mipLevels = isMSTexture ? 1u : (deUint32)texture.getNumLevels();
1958 arrayLayers = texture.getDepth();
1960 textureData.resize(mipLevels);
1962 for (deUint32 level = 0; level < mipLevels; ++level)
1964 if (texture.isLevelEmpty(level))
1967 const tcu::ConstPixelBufferAccess& levelLayers = texture.getLevel(level);
1968 const deUint32 layerSize = levelLayers.getWidth() * levelLayers.getHeight() * levelLayers.getFormat().getPixelSize();
1970 for (deUint32 layer = 0; layer < arrayLayers; ++layer)
1972 const deUint32 layerOffset = layerSize * layer;
1973 tcu::ConstPixelBufferAccess layerData (levelLayers.getFormat(), levelLayers.getWidth(), levelLayers.getHeight(), 1, (deUint8*)levelLayers.getDataPtr() + layerOffset);
1974 textureData[level].push_back(layerData);
1980 TCU_THROW(InternalError, "Invalid texture type");
1983 createSamplerUniform(bindingLocation, textureType, textureBinding.getParameters().initialization, texFormat, texSize, textureData, refSampler, mipLevels, arrayLayers, textureParams);
1986 void ShaderRenderCaseInstance::setPushConstantRanges (const deUint32 rangeCount, const vk::VkPushConstantRange* const pcRanges)
1988 m_pushConstantRanges.clear();
1989 for (deUint32 i = 0; i < rangeCount; ++i)
1991 m_pushConstantRanges.push_back(pcRanges[i]);
1995 void ShaderRenderCaseInstance::updatePushConstants (vk::VkCommandBuffer, vk::VkPipelineLayout)
1999 void ShaderRenderCaseInstance::createSamplerUniform (deUint32 bindingLocation,
2000 TextureBinding::Type textureType,
2001 TextureBinding::Init textureInit,
2002 const tcu::TextureFormat& texFormat,
2003 const tcu::UVec3 texSize,
2004 const TextureData& textureData,
2005 const tcu::Sampler& refSampler,
2007 deUint32 arrayLayers,
2008 TextureBinding::Parameters textureParams)
2010 const VkDevice vkDevice = getDevice();
2011 const DeviceInterface& vk = getDeviceInterface();
2012 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
2014 const bool isShadowSampler = refSampler.compare != tcu::Sampler::COMPAREMODE_NONE;
2015 const VkImageAspectFlags aspectMask = isShadowSampler ? VK_IMAGE_ASPECT_DEPTH_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
2016 const VkImageViewType imageViewType = textureTypeToImageViewType(textureType);
2017 const VkImageType imageType = viewTypeToImageType(imageViewType);
2018 const VkFormat format = mapTextureFormat(texFormat);
2019 const bool isCube = imageViewType == VK_IMAGE_VIEW_TYPE_CUBE || imageViewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
2020 VkImageCreateFlags imageCreateFlags = isCube ? (VkImageCreateFlags)VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : (VkImageCreateFlags)0;
2021 VkImageUsageFlags imageUsageFlags = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2022 Move<VkImage> vkTexture;
2023 de::MovePtr<Allocation> allocation;
2025 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
2027 checkSparseSupport(imageType);
2028 imageCreateFlags |= VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT;
2032 const VkImageCreateInfo imageParams =
2034 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2035 DE_NULL, // const void* pNext;
2036 imageCreateFlags, // VkImageCreateFlags flags;
2037 imageType, // VkImageType imageType;
2038 format, // VkFormat format;
2039 { // VkExtent3D extent;
2044 mipLevels, // deUint32 mipLevels;
2045 arrayLayers, // deUint32 arrayLayers;
2046 textureParams.samples, // VkSampleCountFlagBits samples;
2047 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2048 imageUsageFlags, // VkImageUsageFlags usage;
2049 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2050 1u, // deUint32 queueFamilyIndexCount;
2051 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2052 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2055 vkTexture = createImage(vk, vkDevice, &imageParams);
2056 allocation = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *vkTexture), MemoryRequirement::Any);
2058 if (m_imageBackingMode != IMAGE_BACKING_MODE_SPARSE)
2060 VK_CHECK(vk.bindImageMemory(vkDevice, *vkTexture, allocation->getMemory(), allocation->getOffset()));
2063 switch (textureInit)
2065 case TextureBinding::INIT_UPLOAD_DATA:
2067 // upload*Image functions use cmdCopyBufferToImage, which is invalid for multisample images
2068 DE_ASSERT(textureParams.samples == VK_SAMPLE_COUNT_1_BIT);
2070 if (m_imageBackingMode == IMAGE_BACKING_MODE_SPARSE)
2072 uploadSparseImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture, imageParams, texSize);
2076 // Upload texture data
2077 uploadImage(texFormat, textureData, refSampler, mipLevels, arrayLayers, *vkTexture);
2081 case TextureBinding::INIT_CLEAR:
2082 clearImage(refSampler, mipLevels, arrayLayers, *vkTexture);
2085 DE_FATAL("Impossible");
2089 const VkSamplerCreateInfo samplerParams = mapSampler(refSampler, texFormat);
2090 Move<VkSampler> sampler = createSampler(vk, vkDevice, &samplerParams);
2091 const deUint32 baseMipLevel = textureParams.baseMipLevel;
2092 const vk::VkComponentMapping components = textureParams.componentMapping;
2093 const VkImageViewCreateInfo viewParams =
2095 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2096 NULL, // const voide* pNext;
2097 0u, // VkImageViewCreateFlags flags;
2098 *vkTexture, // VkImage image;
2099 imageViewType, // VkImageViewType viewType;
2100 format, // VkFormat format;
2101 components, // VkChannelMapping channels;
2103 aspectMask, // VkImageAspectFlags aspectMask;
2104 baseMipLevel, // deUint32 baseMipLevel;
2105 mipLevels - baseMipLevel, // deUint32 mipLevels;
2106 0, // deUint32 baseArraySlice;
2107 arrayLayers // deUint32 arraySize;
2108 }, // VkImageSubresourceRange subresourceRange;
2111 Move<VkImageView> imageView = createImageView(vk, vkDevice, &viewParams);
2113 const vk::VkDescriptorImageInfo descriptor =
2115 sampler.get(), // VkSampler sampler;
2116 imageView.get(), // VkImageView imageView;
2117 VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, // VkImageLayout imageLayout;
2120 de::MovePtr<SamplerUniform> uniform(new SamplerUniform());
2121 uniform->type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
2122 uniform->descriptor = descriptor;
2123 uniform->location = bindingLocation;
2124 uniform->image = VkImageSp(new vk::Unique<VkImage>(vkTexture));
2125 uniform->imageView = VkImageViewSp(new vk::Unique<VkImageView>(imageView));
2126 uniform->sampler = VkSamplerSp(new vk::Unique<VkSampler>(sampler));
2127 uniform->alloc = AllocationSp(allocation.release());
2129 m_descriptorSetLayoutBuilder->addSingleSamplerBinding(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, vk::VK_SHADER_STAGE_ALL, DE_NULL);
2130 m_descriptorPoolBuilder->addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
2132 m_uniformInfos.push_back(UniformInfoSp(new de::UniquePtr<UniformInfo>(uniform)));
2135 void ShaderRenderCaseInstance::setupDefaultInputs (void)
2137 /* Configuration of the vertex input attributes:
2138 a_position is at location 0
2139 a_coords is at location 1
2140 a_unitCoords is at location 2
2141 a_one is at location 3
2143 User attributes starts from at the location 4.
2146 DE_ASSERT(m_quadGrid);
2147 const QuadGrid& quadGrid = *m_quadGrid;
2149 addAttribute(0u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getPositions());
2150 addAttribute(1u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getCoords());
2151 addAttribute(2u, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getUnitCoords());
2152 addAttribute(3u, VK_FORMAT_R32_SFLOAT, sizeof(float), quadGrid.getNumVertices(), quadGrid.getAttribOne());
2156 BaseAttributeType type;
2158 } userAttributes[] =
2168 BaseAttributeType matrixType;
2184 for (size_t attrNdx = 0; attrNdx < m_enabledBaseAttributes.size(); attrNdx++)
2186 for (int userNdx = 0; userNdx < DE_LENGTH_OF_ARRAY(userAttributes); userNdx++)
2188 if (userAttributes[userNdx].type != m_enabledBaseAttributes[attrNdx].type)
2191 addAttribute(m_enabledBaseAttributes[attrNdx].location, VK_FORMAT_R32G32B32A32_SFLOAT, sizeof(tcu::Vec4), quadGrid.getNumVertices(), quadGrid.getUserAttrib(userNdx));
2194 for (int matNdx = 0; matNdx < DE_LENGTH_OF_ARRAY(matrices); matNdx++)
2197 if (matrices[matNdx].matrixType != m_enabledBaseAttributes[attrNdx].type)
2200 const int numCols = matrices[matNdx].numCols;
2202 for (int colNdx = 0; colNdx < numCols; colNdx++)
2204 addAttribute(m_enabledBaseAttributes[attrNdx].location + colNdx, VK_FORMAT_R32G32B32A32_SFLOAT, (deUint32)(4 * sizeof(float)), quadGrid.getNumVertices(), quadGrid.getUserAttrib(colNdx));
2210 void ShaderRenderCaseInstance::render (deUint32 numVertices,
2211 deUint32 numTriangles,
2212 const deUint16* indices,
2213 const tcu::Vec4& constCoords)
2215 render(numVertices, numTriangles * 3, indices, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, constCoords);
2218 void ShaderRenderCaseInstance::render (deUint32 numVertices,
2219 deUint32 numIndices,
2220 const deUint16* indices,
2221 VkPrimitiveTopology topology,
2222 const tcu::Vec4& constCoords)
2224 const VkDevice vkDevice = getDevice();
2225 const DeviceInterface& vk = getDeviceInterface();
2226 const VkQueue queue = getUniversalQueue();
2227 const deUint32 queueFamilyIndex = getUniversalQueueFamilyIndex();
2229 vk::Move<vk::VkImage> colorImage;
2230 de::MovePtr<vk::Allocation> colorImageAlloc;
2231 vk::Move<vk::VkImageView> colorImageView;
2232 vk::Move<vk::VkImage> resolvedImage;
2233 de::MovePtr<vk::Allocation> resolvedImageAlloc;
2234 vk::Move<vk::VkImageView> resolvedImageView;
2235 vk::Move<vk::VkRenderPass> renderPass;
2236 vk::Move<vk::VkFramebuffer> framebuffer;
2237 vk::Move<vk::VkPipelineLayout> pipelineLayout;
2238 vk::Move<vk::VkPipeline> graphicsPipeline;
2239 vk::Move<vk::VkShaderModule> vertexShaderModule;
2240 vk::Move<vk::VkShaderModule> fragmentShaderModule;
2241 vk::Move<vk::VkBuffer> indexBuffer;
2242 de::MovePtr<vk::Allocation> indexBufferAlloc;
2243 vk::Move<vk::VkDescriptorSetLayout> descriptorSetLayout;
2244 vk::Move<vk::VkDescriptorPool> descriptorPool;
2245 vk::Move<vk::VkDescriptorSet> descriptorSet;
2246 vk::Move<vk::VkCommandPool> cmdPool;
2247 vk::Move<vk::VkCommandBuffer> cmdBuffer;
2248 vk::Move<vk::VkFence> fence;
2250 // Create color image
2252 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
2253 VkImageFormatProperties properties;
2255 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(getPhysicalDevice(),
2258 VK_IMAGE_TILING_OPTIMAL,
2261 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
2263 TCU_THROW(NotSupportedError, "Format not supported");
2266 if ((properties.sampleCounts & m_sampleCount) != m_sampleCount)
2268 TCU_THROW(NotSupportedError, "Format not supported");
2271 const VkImageCreateInfo colorImageParams =
2273 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2274 DE_NULL, // const void* pNext;
2275 0u, // VkImageCreateFlags flags;
2276 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2277 m_colorFormat, // VkFormat format;
2278 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
2279 1u, // deUint32 mipLevels;
2280 1u, // deUint32 arraySize;
2281 m_sampleCount, // deUint32 samples;
2282 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2283 imageUsage, // VkImageUsageFlags usage;
2284 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2285 1u, // deUint32 queueFamilyCount;
2286 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2287 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
2290 colorImage = createImage(vk, vkDevice, &colorImageParams);
2292 // Allocate and bind color image memory
2293 colorImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *colorImage), MemoryRequirement::Any);
2294 VK_CHECK(vk.bindImageMemory(vkDevice, *colorImage, colorImageAlloc->getMemory(), colorImageAlloc->getOffset()));
2297 // Create color attachment view
2299 const VkImageViewCreateInfo colorImageViewParams =
2301 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2302 DE_NULL, // const void* pNext;
2303 0u, // VkImageViewCreateFlags flags;
2304 *colorImage, // VkImage image;
2305 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2306 m_colorFormat, // VkFormat format;
2308 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
2309 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
2310 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
2311 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
2312 }, // VkChannelMapping channels;
2314 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2315 0, // deUint32 baseMipLevel;
2316 1, // deUint32 mipLevels;
2317 0, // deUint32 baseArraySlice;
2318 1 // deUint32 arraySize;
2319 }, // VkImageSubresourceRange subresourceRange;
2322 colorImageView = createImageView(vk, vkDevice, &colorImageViewParams);
2325 if (isMultiSampling())
2329 const VkImageUsageFlags imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT;
2330 VkImageFormatProperties properties;
2332 if ((getInstanceInterface().getPhysicalDeviceImageFormatProperties(getPhysicalDevice(),
2335 VK_IMAGE_TILING_OPTIMAL,
2338 &properties) == VK_ERROR_FORMAT_NOT_SUPPORTED))
2340 TCU_THROW(NotSupportedError, "Format not supported");
2343 const VkImageCreateInfo imageCreateInfo =
2345 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
2346 DE_NULL, // const void* pNext;
2347 0u, // VkImageCreateFlags flags;
2348 VK_IMAGE_TYPE_2D, // VkImageType imageType;
2349 m_colorFormat, // VkFormat format;
2350 { m_renderSize.x(), m_renderSize.y(), 1u }, // VkExtent3D extent;
2351 1u, // deUint32 mipLevels;
2352 1u, // deUint32 arrayLayers;
2353 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2354 VK_IMAGE_TILING_OPTIMAL, // VkImageTiling tiling;
2355 imageUsage, // VkImageUsageFlags usage;
2356 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2357 1u, // deUint32 queueFamilyIndexCount;
2358 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2359 VK_IMAGE_LAYOUT_UNDEFINED // VkImageLayout initialLayout;
2362 resolvedImage = vk::createImage(vk, vkDevice, &imageCreateInfo, DE_NULL);
2363 resolvedImageAlloc = m_memAlloc.allocate(getImageMemoryRequirements(vk, vkDevice, *resolvedImage), MemoryRequirement::Any);
2364 VK_CHECK(vk.bindImageMemory(vkDevice, *resolvedImage, resolvedImageAlloc->getMemory(), resolvedImageAlloc->getOffset()));
2367 // Resolved Image View
2369 const VkImageViewCreateInfo imageViewCreateInfo =
2371 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // VkStructureType sType;
2372 DE_NULL, // const void* pNext;
2373 0u, // VkImageViewCreateFlags flags;
2374 *resolvedImage, // VkImage image;
2375 VK_IMAGE_VIEW_TYPE_2D, // VkImageViewType viewType;
2376 m_colorFormat, // VkFormat format;
2378 VK_COMPONENT_SWIZZLE_R, // VkChannelSwizzle r;
2379 VK_COMPONENT_SWIZZLE_G, // VkChannelSwizzle g;
2380 VK_COMPONENT_SWIZZLE_B, // VkChannelSwizzle b;
2381 VK_COMPONENT_SWIZZLE_A // VkChannelSwizzle a;
2384 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2385 0u, // deUint32 baseMipLevel;
2386 1u, // deUint32 mipLevels;
2387 0u, // deUint32 baseArrayLayer;
2388 1u, // deUint32 arraySize;
2389 }, // VkImageSubresourceRange subresourceRange;
2392 resolvedImageView = vk::createImageView(vk, vkDevice, &imageViewCreateInfo, DE_NULL);
2396 // Create render pass
2398 const VkAttachmentDescription attachmentDescription[] =
2401 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2402 m_colorFormat, // VkFormat format;
2403 m_sampleCount, // deUint32 samples;
2404 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
2405 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2406 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2407 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2408 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2409 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2412 (VkAttachmentDescriptionFlags)0, // VkAttachmentDescriptionFlags flags;
2413 m_colorFormat, // VkFormat format;
2414 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
2415 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp loadOp;
2416 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
2417 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
2418 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
2419 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
2420 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
2424 const VkAttachmentReference attachmentReference =
2426 0u, // deUint32 attachment;
2427 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2430 const VkAttachmentReference resolveAttachmentRef =
2432 1u, // deUint32 attachment;
2433 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
2436 const VkSubpassDescription subpassDescription =
2438 0u, // VkSubpassDescriptionFlags flags;
2439 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
2440 0u, // deUint32 inputCount;
2441 DE_NULL, // constVkAttachmentReference* pInputAttachments;
2442 1u, // deUint32 colorCount;
2443 &attachmentReference, // constVkAttachmentReference* pColorAttachments;
2444 isMultiSampling() ? &resolveAttachmentRef : DE_NULL,// constVkAttachmentReference* pResolveAttachments;
2445 DE_NULL, // VkAttachmentReference depthStencilAttachment;
2446 0u, // deUint32 preserveCount;
2447 DE_NULL // constVkAttachmentReference* pPreserveAttachments;
2450 const VkRenderPassCreateInfo renderPassParams =
2452 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
2453 DE_NULL, // const void* pNext;
2454 0u, // VkRenderPassCreateFlags flags;
2455 isMultiSampling() ? 2u : 1u, // deUint32 attachmentCount;
2456 attachmentDescription, // const VkAttachmentDescription* pAttachments;
2457 1u, // deUint32 subpassCount;
2458 &subpassDescription, // const VkSubpassDescription* pSubpasses;
2459 0u, // deUint32 dependencyCount;
2460 DE_NULL // const VkSubpassDependency* pDependencies;
2463 renderPass = createRenderPass(vk, vkDevice, &renderPassParams);
2466 // Create framebuffer
2468 const VkImageView attachments[] =
2474 const VkFramebufferCreateInfo framebufferParams =
2476 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
2477 DE_NULL, // const void* pNext;
2478 (VkFramebufferCreateFlags)0,
2479 *renderPass, // VkRenderPass renderPass;
2480 isMultiSampling() ? 2u : 1u, // deUint32 attachmentCount;
2481 attachments, // const VkImageView* pAttachments;
2482 (deUint32)m_renderSize.x(), // deUint32 width;
2483 (deUint32)m_renderSize.y(), // deUint32 height;
2484 1u // deUint32 layers;
2487 framebuffer = createFramebuffer(vk, vkDevice, &framebufferParams);
2490 // Create descriptors
2492 setupUniforms(constCoords);
2494 descriptorSetLayout = m_descriptorSetLayoutBuilder->build(vk, vkDevice);
2495 if (!m_uniformInfos.empty())
2497 descriptorPool = m_descriptorPoolBuilder->build(vk, vkDevice, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2498 const VkDescriptorSetAllocateInfo allocInfo =
2500 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
2504 &descriptorSetLayout.get(),
2507 descriptorSet = allocateDescriptorSet(vk, vkDevice, &allocInfo);
2510 for (deUint32 i = 0; i < m_uniformInfos.size(); i++)
2512 const UniformInfo* uniformInfo = m_uniformInfos[i].get()->get();
2513 deUint32 location = uniformInfo->location;
2515 if (uniformInfo->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
2517 const BufferUniform* bufferInfo = dynamic_cast<const BufferUniform*>(uniformInfo);
2519 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(location), uniformInfo->type, &bufferInfo->descriptor);
2521 else if (uniformInfo->type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2523 const SamplerUniform* samplerInfo = dynamic_cast<const SamplerUniform*>(uniformInfo);
2525 m_descriptorSetUpdateBuilder->writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(location), uniformInfo->type, &samplerInfo->descriptor);
2528 DE_FATAL("Impossible");
2531 m_descriptorSetUpdateBuilder->update(vk, vkDevice);
2534 // Create pipeline layout
2536 const VkPushConstantRange* const pcRanges = m_pushConstantRanges.empty() ? DE_NULL : &m_pushConstantRanges[0];
2537 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
2539 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // VkStructureType sType;
2540 DE_NULL, // const void* pNext;
2541 (VkPipelineLayoutCreateFlags)0,
2542 1u, // deUint32 descriptorSetCount;
2543 &*descriptorSetLayout, // const VkDescriptorSetLayout* pSetLayouts;
2544 deUint32(m_pushConstantRanges.size()), // deUint32 pushConstantRangeCount;
2545 pcRanges // const VkPushConstantRange* pPushConstantRanges;
2548 pipelineLayout = createPipelineLayout(vk, vkDevice, &pipelineLayoutParams);
2553 vertexShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_vertexShaderName), 0);
2554 fragmentShaderModule = createShaderModule(vk, vkDevice, m_context.getBinaryCollection().get(m_fragmentShaderName), 0);
2559 const VkPipelineShaderStageCreateInfo shaderStageParams[2] =
2562 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2563 DE_NULL, // const void* pNext;
2564 (VkPipelineShaderStageCreateFlags)0,
2565 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStage stage;
2566 *vertexShaderModule, // VkShader shader;
2568 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
2571 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
2572 DE_NULL, // const void* pNext;
2573 (VkPipelineShaderStageCreateFlags)0,
2574 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStage stage;
2575 *fragmentShaderModule, // VkShader shader;
2577 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
2581 // Add test case specific attributes
2583 m_attribFunc(*this, numVertices);
2585 // Add base attributes
2586 setupDefaultInputs();
2588 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
2590 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
2591 DE_NULL, // const void* pNext;
2592 (VkPipelineVertexInputStateCreateFlags)0,
2593 (deUint32)m_vertexBindingDescription.size(), // deUint32 bindingCount;
2594 &m_vertexBindingDescription[0], // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
2595 (deUint32)m_vertexAttributeDescription.size(), // deUint32 attributeCount;
2596 &m_vertexAttributeDescription[0], // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
2599 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams =
2601 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType;
2602 DE_NULL, // const void* pNext;
2603 (VkPipelineInputAssemblyStateCreateFlags)0,
2604 topology, // VkPrimitiveTopology topology;
2605 false // VkBool32 primitiveRestartEnable;
2608 const VkViewport viewport =
2610 0.0f, // float originX;
2611 0.0f, // float originY;
2612 (float)m_renderSize.x(), // float width;
2613 (float)m_renderSize.y(), // float height;
2614 0.0f, // float minDepth;
2615 1.0f // float maxDepth;
2618 const VkRect2D scissor =
2623 }, // VkOffset2D offset;
2625 m_renderSize.x(), // deUint32 width;
2626 m_renderSize.y(), // deUint32 height;
2627 }, // VkExtent2D extent;
2630 const VkPipelineViewportStateCreateInfo viewportStateParams =
2632 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
2633 DE_NULL, // const void* pNext;
2634 0u, // VkPipelineViewportStateCreateFlags flags;
2635 1u, // deUint32 viewportCount;
2636 &viewport, // const VkViewport* pViewports;
2637 1u, // deUint32 scissorsCount;
2638 &scissor, // const VkRect2D* pScissors;
2641 const VkPipelineRasterizationStateCreateInfo rasterStateParams =
2643 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
2644 DE_NULL, // const void* pNext;
2645 (VkPipelineRasterizationStateCreateFlags)0,
2646 false, // VkBool32 depthClipEnable;
2647 false, // VkBool32 rasterizerDiscardEnable;
2648 VK_POLYGON_MODE_FILL, // VkFillMode fillMode;
2649 VK_CULL_MODE_NONE, // VkCullMode cullMode;
2650 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
2651 false, // VkBool32 depthBiasEnable;
2652 0.0f, // float depthBias;
2653 0.0f, // float depthBiasClamp;
2654 0.0f, // float slopeScaledDepthBias;
2655 1.0f, // float lineWidth;
2658 const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
2660 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
2661 DE_NULL, // const void* pNext;
2662 0u, // VkPipelineMultisampleStateCreateFlags flags;
2663 m_sampleCount, // VkSampleCountFlagBits rasterizationSamples;
2664 VK_FALSE, // VkBool32 sampleShadingEnable;
2665 0.0f, // float minSampleShading;
2666 DE_NULL, // const VkSampleMask* pSampleMask;
2667 VK_FALSE, // VkBool32 alphaToCoverageEnable;
2668 VK_FALSE // VkBool32 alphaToOneEnable;
2671 const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
2673 false, // VkBool32 blendEnable;
2674 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendColor;
2675 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendColor;
2676 VK_BLEND_OP_ADD, // VkBlendOp blendOpColor;
2677 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendAlpha;
2678 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendAlpha;
2679 VK_BLEND_OP_ADD, // VkBlendOp blendOpAlpha;
2680 (VK_COLOR_COMPONENT_R_BIT |
2681 VK_COLOR_COMPONENT_G_BIT |
2682 VK_COLOR_COMPONENT_B_BIT |
2683 VK_COLOR_COMPONENT_A_BIT), // VkChannelFlags channelWriteMask;
2686 const VkPipelineColorBlendStateCreateInfo colorBlendStateParams =
2688 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
2689 DE_NULL, // const void* pNext;
2690 (VkPipelineColorBlendStateCreateFlags)0,
2691 false, // VkBool32 logicOpEnable;
2692 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
2693 1u, // deUint32 attachmentCount;
2694 &colorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
2695 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConst[4];
2698 const VkGraphicsPipelineCreateInfo graphicsPipelineParams =
2700 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
2701 DE_NULL, // const void* pNext;
2702 0u, // VkPipelineCreateFlags flags;
2703 2u, // deUint32 stageCount;
2704 shaderStageParams, // const VkPipelineShaderStageCreateInfo* pStages;
2705 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
2706 &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
2707 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
2708 &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
2709 &rasterStateParams, // const VkPipelineRasterStateCreateInfo* pRasterState;
2710 &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
2711 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
2712 &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
2713 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
2714 *pipelineLayout, // VkPipelineLayout layout;
2715 *renderPass, // VkRenderPass renderPass;
2716 0u, // deUint32 subpass;
2717 0u, // VkPipeline basePipelineHandle;
2718 0u // deInt32 basePipelineIndex;
2721 graphicsPipeline = createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams);
2724 // Create vertex indices buffer
2725 if (numIndices != 0)
2727 const VkDeviceSize indexBufferSize = numIndices * sizeof(deUint16);
2728 const VkBufferCreateInfo indexBufferParams =
2730 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
2731 DE_NULL, // const void* pNext;
2732 0u, // VkBufferCreateFlags flags;
2733 indexBufferSize, // VkDeviceSize size;
2734 VK_BUFFER_USAGE_INDEX_BUFFER_BIT, // VkBufferUsageFlags usage;
2735 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2736 1u, // deUint32 queueFamilyCount;
2737 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
2740 indexBuffer = createBuffer(vk, vkDevice, &indexBufferParams);
2741 indexBufferAlloc = m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *indexBuffer), MemoryRequirement::HostVisible);
2743 VK_CHECK(vk.bindBufferMemory(vkDevice, *indexBuffer, indexBufferAlloc->getMemory(), indexBufferAlloc->getOffset()));
2745 // Load vertice indices into buffer
2746 deMemcpy(indexBufferAlloc->getHostPtr(), indices, (size_t)indexBufferSize);
2747 flushMappedMemoryRange(vk, vkDevice, indexBufferAlloc->getMemory(), indexBufferAlloc->getOffset(), indexBufferSize);
2750 // Create command pool
2751 cmdPool = createCommandPool(vk, vkDevice, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex);
2753 // Create command buffer
2755 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
2757 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
2758 DE_NULL, // const void* pNext;
2759 0u, // VkCmdBufferOptimizeFlags flags;
2760 (const VkCommandBufferInheritanceInfo*)DE_NULL,
2763 const VkClearValue clearValues = makeClearValueColorF32(m_clearColor.x(),
2768 const VkRenderPassBeginInfo renderPassBeginInfo =
2770 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
2771 DE_NULL, // const void* pNext;
2772 *renderPass, // VkRenderPass renderPass;
2773 *framebuffer, // VkFramebuffer framebuffer;
2774 { { 0, 0 }, {m_renderSize.x(), m_renderSize.y() } }, // VkRect2D renderArea;
2775 1, // deUint32 clearValueCount;
2776 &clearValues, // const VkClearValue* pClearValues;
2779 cmdBuffer = allocateCommandBuffer(vk, vkDevice, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2781 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
2784 const VkImageMemoryBarrier imageBarrier =
2786 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2787 DE_NULL, // const void* pNext;
2788 0u, // VkAccessFlags srcAccessMask;
2789 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2790 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2791 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2792 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2793 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2794 *colorImage, // VkImage image;
2795 { // VkImageSubresourceRange subresourceRange;
2796 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2797 0u, // deUint32 baseMipLevel;
2798 1u, // deUint32 mipLevels;
2799 0u, // deUint32 baseArrayLayer;
2800 1u, // deUint32 arraySize;
2804 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, DE_NULL, 1, &imageBarrier);
2806 if (isMultiSampling()) {
2807 // add multisample barrier
2808 const VkImageMemoryBarrier multiSampleImageBarrier =
2810 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2811 DE_NULL, // const void* pNext;
2812 0u, // VkAccessFlags srcAccessMask;
2813 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
2814 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
2815 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
2816 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2817 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2818 *resolvedImage, // VkImage image;
2819 { // VkImageSubresourceRange subresourceRange;
2820 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2821 0u, // deUint32 baseMipLevel;
2822 1u, // deUint32 mipLevels;
2823 0u, // deUint32 baseArrayLayer;
2824 1u, // deUint32 arraySize;
2828 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, DE_NULL, 1, &multiSampleImageBarrier);
2832 vk.cmdBeginRenderPass(*cmdBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE);
2833 updatePushConstants(*cmdBuffer, *pipelineLayout);
2834 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *graphicsPipeline);
2835 if (!m_uniformInfos.empty())
2836 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
2838 const deUint32 numberOfVertexAttributes = (deUint32)m_vertexBuffers.size();
2839 const std::vector<VkDeviceSize> offsets(numberOfVertexAttributes, 0);
2841 std::vector<VkBuffer> buffers(numberOfVertexAttributes);
2842 for (size_t i = 0; i < numberOfVertexAttributes; i++)
2844 buffers[i] = m_vertexBuffers[i].get()->get();
2847 vk.cmdBindVertexBuffers(*cmdBuffer, 0, numberOfVertexAttributes, &buffers[0], &offsets[0]);
2848 if (numIndices != 0)
2850 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT16);
2851 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1, 0, 0, 0);
2854 vk.cmdDraw(*cmdBuffer, numVertices, 1, 0, 1);
2856 vk.cmdEndRenderPass(*cmdBuffer);
2857 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
2861 fence = createFence(vk, vkDevice);
2865 const VkSubmitInfo submitInfo =
2867 VK_STRUCTURE_TYPE_SUBMIT_INFO,
2870 (const VkSemaphore*)DE_NULL,
2871 (const VkPipelineStageFlags*)DE_NULL,
2875 (const VkSemaphore*)DE_NULL,
2878 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
2879 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity*/));
2882 // Read back the result
2884 const tcu::TextureFormat resultFormat = mapVkFormat(m_colorFormat);
2885 const VkDeviceSize imageSizeBytes = (VkDeviceSize)(resultFormat.getPixelSize() * m_renderSize.x() * m_renderSize.y());
2886 const VkBufferCreateInfo readImageBufferParams =
2888 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
2889 DE_NULL, // const void* pNext;
2890 0u, // VkBufferCreateFlags flags;
2891 imageSizeBytes, // VkDeviceSize size;
2892 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
2893 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
2894 1u, // deUint32 queueFamilyCount;
2895 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
2897 const Unique<VkBuffer> readImageBuffer (createBuffer(vk, vkDevice, &readImageBufferParams));
2898 const de::UniquePtr<Allocation> readImageBufferMemory (m_memAlloc.allocate(getBufferMemoryRequirements(vk, vkDevice, *readImageBuffer), MemoryRequirement::HostVisible));
2900 VK_CHECK(vk.bindBufferMemory(vkDevice, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
2902 // Copy image to buffer
2903 const VkCommandBufferBeginInfo cmdBufferBeginInfo =
2905 VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
2906 DE_NULL, // const void* pNext;
2907 0u, // VkCmdBufferOptimizeFlags flags;
2908 (const VkCommandBufferInheritanceInfo*)DE_NULL,
2911 const Move<VkCommandBuffer> resultCmdBuffer = allocateCommandBuffer(vk, vkDevice, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2913 const VkBufferImageCopy copyParams =
2915 0u, // VkDeviceSize bufferOffset;
2916 (deUint32)m_renderSize.x(), // deUint32 bufferRowLength;
2917 (deUint32)m_renderSize.y(), // deUint32 bufferImageHeight;
2919 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspect aspect;
2920 0u, // deUint32 mipLevel;
2921 0u, // deUint32 arraySlice;
2922 1u, // deUint32 arraySize;
2923 }, // VkImageSubresourceCopy imageSubresource;
2924 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
2925 { m_renderSize.x(), m_renderSize.y(), 1u } // VkExtent3D imageExtent;
2927 const VkSubmitInfo submitInfo =
2929 VK_STRUCTURE_TYPE_SUBMIT_INFO,
2932 (const VkSemaphore*)DE_NULL,
2933 (const VkPipelineStageFlags*)DE_NULL,
2935 &resultCmdBuffer.get(),
2937 (const VkSemaphore*)DE_NULL,
2940 VK_CHECK(vk.beginCommandBuffer(*resultCmdBuffer, &cmdBufferBeginInfo));
2942 const VkImageMemoryBarrier imageBarrier =
2944 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
2945 DE_NULL, // const void* pNext;
2946 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
2947 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
2948 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
2949 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
2950 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2951 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2952 isMultiSampling() ? *resolvedImage : *colorImage, // VkImage image;
2953 { // VkImageSubresourceRange subresourceRange;
2954 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
2955 0u, // deUint32 baseMipLevel;
2956 1u, // deUint32 mipLevels;
2957 0u, // deUint32 baseArraySlice;
2958 1u // deUint32 arraySize;
2962 const VkBufferMemoryBarrier bufferBarrier =
2964 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
2965 DE_NULL, // const void* pNext;
2966 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
2967 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
2968 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
2969 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
2970 *readImageBuffer, // VkBuffer buffer;
2971 0u, // VkDeviceSize offset;
2972 imageSizeBytes // VkDeviceSize size;
2975 vk.cmdPipelineBarrier(*resultCmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &imageBarrier);
2976 vk.cmdCopyImageToBuffer(*resultCmdBuffer, isMultiSampling() ? *resolvedImage : *colorImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
2977 vk.cmdPipelineBarrier(*resultCmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &bufferBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
2979 VK_CHECK(vk.endCommandBuffer(*resultCmdBuffer));
2981 VK_CHECK(vk.resetFences(vkDevice, 1, &fence.get()));
2982 VK_CHECK(vk.queueSubmit(queue, 1, &submitInfo, *fence));
2983 VK_CHECK(vk.waitForFences(vkDevice, 1, &fence.get(), true, ~(0ull) /* infinity */));
2985 invalidateMappedMemoryRange(vk, vkDevice, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset(), imageSizeBytes);
2987 const tcu::ConstPixelBufferAccess resultAccess (resultFormat, m_renderSize.x(), m_renderSize.y(), 1, readImageBufferMemory->getHostPtr());
2989 m_resultImage.setStorage(resultFormat, m_renderSize.x(), m_renderSize.y());
2990 tcu::copy(m_resultImage.getAccess(), resultAccess);
2994 void ShaderRenderCaseInstance::computeVertexReference (tcu::Surface& result, const QuadGrid& quadGrid)
2996 DE_ASSERT(m_evaluator);
2999 const int width = result.getWidth();
3000 const int height = result.getHeight();
3001 const int gridSize = quadGrid.getGridSize();
3002 const int stride = gridSize + 1;
3003 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
3004 ShaderEvalContext evalCtx (quadGrid);
3006 // Evaluate color for each vertex.
3007 std::vector<tcu::Vec4> colors ((gridSize + 1) * (gridSize + 1));
3008 for (int y = 0; y < gridSize+1; y++)
3009 for (int x = 0; x < gridSize+1; x++)
3011 const float sx = (float)x / (float)gridSize;
3012 const float sy = (float)y / (float)gridSize;
3013 const int vtxNdx = ((y * (gridSize+1)) + x);
3015 evalCtx.reset(sx, sy);
3016 m_evaluator->evaluate(evalCtx);
3017 DE_ASSERT(!evalCtx.isDiscarded); // Discard is not available in vertex shader.
3018 tcu::Vec4 color = evalCtx.color;
3023 colors[vtxNdx] = color;
3027 for (int y = 0; y < gridSize; y++)
3028 for (int x = 0; x < gridSize; x++)
3030 const float x0 = (float)x / (float)gridSize;
3031 const float x1 = (float)(x + 1) / (float)gridSize;
3032 const float y0 = (float)y / (float)gridSize;
3033 const float y1 = (float)(y + 1) / (float)gridSize;
3035 const float sx0 = x0 * (float)width;
3036 const float sx1 = x1 * (float)width;
3037 const float sy0 = y0 * (float)height;
3038 const float sy1 = y1 * (float)height;
3039 const float oosx = 1.0f / (sx1 - sx0);
3040 const float oosy = 1.0f / (sy1 - sy0);
3042 const int ix0 = deCeilFloatToInt32(sx0 - 0.5f);
3043 const int ix1 = deCeilFloatToInt32(sx1 - 0.5f);
3044 const int iy0 = deCeilFloatToInt32(sy0 - 0.5f);
3045 const int iy1 = deCeilFloatToInt32(sy1 - 0.5f);
3047 const int v00 = (y * stride) + x;
3048 const int v01 = (y * stride) + x + 1;
3049 const int v10 = ((y + 1) * stride) + x;
3050 const int v11 = ((y + 1) * stride) + x + 1;
3051 const tcu::Vec4 c00 = colors[v00];
3052 const tcu::Vec4 c01 = colors[v01];
3053 const tcu::Vec4 c10 = colors[v10];
3054 const tcu::Vec4 c11 = colors[v11];
3056 //printf("(%d,%d) -> (%f..%f, %f..%f) (%d..%d, %d..%d)\n", x, y, sx0, sx1, sy0, sy1, ix0, ix1, iy0, iy1);
3058 for (int iy = iy0; iy < iy1; iy++)
3059 for (int ix = ix0; ix < ix1; ix++)
3061 DE_ASSERT(deInBounds32(ix, 0, width));
3062 DE_ASSERT(deInBounds32(iy, 0, height));
3064 const float sfx = (float)ix + 0.5f;
3065 const float sfy = (float)iy + 0.5f;
3066 const float fx1 = deFloatClamp((sfx - sx0) * oosx, 0.0f, 1.0f);
3067 const float fy1 = deFloatClamp((sfy - sy0) * oosy, 0.0f, 1.0f);
3069 // Triangle quad interpolation.
3070 const bool tri = fx1 + fy1 <= 1.0f;
3071 const float tx = tri ? fx1 : (1.0f-fx1);
3072 const float ty = tri ? fy1 : (1.0f-fy1);
3073 const tcu::Vec4& t0 = tri ? c00 : c11;
3074 const tcu::Vec4& t1 = tri ? c01 : c10;
3075 const tcu::Vec4& t2 = tri ? c10 : c01;
3076 const tcu::Vec4 color = t0 + (t1-t0)*tx + (t2-t0)*ty;
3078 result.setPixel(ix, iy, tcu::RGBA(color));
3083 void ShaderRenderCaseInstance::computeFragmentReference (tcu::Surface& result, const QuadGrid& quadGrid)
3085 DE_ASSERT(m_evaluator);
3088 const int width = result.getWidth();
3089 const int height = result.getHeight();
3090 const bool hasAlpha = true; // \todo [2015-09-07 elecro] add correct alpha check
3091 ShaderEvalContext evalCtx (quadGrid);
3094 for (int y = 0; y < height; y++)
3095 for (int x = 0; x < width; x++)
3097 const float sx = ((float)x + 0.5f) / (float)width;
3098 const float sy = ((float)y + 0.5f) / (float)height;
3100 evalCtx.reset(sx, sy);
3101 m_evaluator->evaluate(evalCtx);
3102 // Select either clear color or computed color based on discarded bit.
3103 tcu::Vec4 color = evalCtx.isDiscarded ? m_clearColor : evalCtx.color;
3108 result.setPixel(x, y, tcu::RGBA(color));
3112 bool ShaderRenderCaseInstance::compareImages (const tcu::Surface& resImage, const tcu::Surface& refImage, float errorThreshold)
3114 return tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ComparisonResult", "Image comparison result", refImage, resImage, errorThreshold, tcu::COMPARE_LOG_RESULT);