1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
5 * Copyright (c) 2015 Google Inc.
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief ShaderLibrary Vulkan implementation
22 *//*--------------------------------------------------------------------*/
24 #include "vktShaderLibrary.hpp"
25 #include "vktTestCase.hpp"
27 #include "vkPrograms.hpp"
29 #include "vkRefUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkQueryUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkImageUtil.hpp"
36 #include "gluShaderLibrary.hpp"
37 #include "gluShaderUtil.hpp"
39 #include "tcuStringTemplate.hpp"
40 #include "tcuTexture.hpp"
41 #include "tcuTestLog.hpp"
42 #include "tcuVector.hpp"
43 #include "tcuVectorUtil.hpp"
45 #include "deStringUtil.hpp"
46 #include "deArrayUtil.hpp"
59 using std::ostringstream;
64 using glu::ShaderType;
65 using glu::ProgramSources;
68 using glu::sl::ShaderCaseSpecification;
69 using glu::sl::ProgramSpecializationParams;
70 using glu::sl::RequiredExtension;
72 using glu::sl::ValueBlock;
74 using tcu::TestStatus;
75 using tcu::StringTemplate;
77 using tcu::ConstPixelBufferAccess;
78 using tcu::TextureFormat;
81 using vk::SourceCollections;
90 REFERENCE_UNIFORM_BINDING = 0,
91 USER_UNIFORM_BINDING = 1
94 string getShaderName (ShaderType shaderType, size_t progNdx)
97 str << glu::getShaderTypeName(shaderType);
99 str << "_" << progNdx;
103 void genUniformBlock (ostringstream& out, const string& blockName, const string& instanceName, int setNdx, int bindingNdx, const vector<Value>& uniforms)
108 out << "set = " << setNdx << ", ";
110 out << "binding = " << bindingNdx << ", std140) uniform " << blockName << "\n"
113 for (vector<Value>::const_iterator val = uniforms.begin(); val != uniforms.end(); ++val)
114 out << "\t" << glu::declare(val->type, val->name, 1) << ";\n";
118 if (!instanceName.empty())
119 out << " " << instanceName;
124 void declareReferenceBlock (ostringstream& out, const ValueBlock& valueBlock)
126 if (!valueBlock.outputs.empty())
127 genUniformBlock(out, "Reference", "ref", 0, REFERENCE_UNIFORM_BINDING, valueBlock.outputs);
130 void declareUniforms (ostringstream& out, const ValueBlock& valueBlock)
132 if (!valueBlock.uniforms.empty())
133 genUniformBlock(out, "Uniforms", "", 0, USER_UNIFORM_BINDING, valueBlock.uniforms);
136 DataType getTransportType (DataType valueType)
138 if (isDataTypeBoolOrBVec(valueType))
139 return glu::getDataTypeUintVec(getDataTypeScalarSize(valueType));
144 int getNumTransportLocations (DataType valueType)
146 return isDataTypeMatrix(valueType) ? getDataTypeMatrixNumColumns(valueType) : 1;
149 // This functions builds a matching vertex shader for a 'both' case, when
150 // the fragment shader is being tested.
151 // We need to build attributes and varyings for each 'input'.
152 string genVertexShader (const ShaderCaseSpecification& spec)
156 int curOutputLoc = 0;
158 res << glu::getGLSLVersionDeclaration(spec.targetVersion) << "\n";
160 // Declarations (position + attribute/varying for each input).
161 res << "precision highp float;\n";
162 res << "precision highp int;\n";
164 res << "layout(location = 0) in highp vec4 dEQP_Position;\n";
167 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
169 const Value& val = spec.values.inputs[ndx];
170 const DataType valueType = val.type.getBasicType();
171 const DataType transportType = getTransportType(valueType);
172 const char* const transportTypeStr = getDataTypeName(transportType);
173 const int numLocs = getNumTransportLocations(valueType);
175 res << "layout(location = " << curInputLoc << ") in " << transportTypeStr << " a_" << val.name << ";\n";
176 res << "layout(location = " << curOutputLoc << ") flat out " << transportTypeStr << " " << (transportType != valueType ? "v_" : "") << val.name << ";\n";
178 curInputLoc += numLocs;
179 curOutputLoc += numLocs;
184 // - gl_Position = dEQP_Position;
185 // - for each input: write attribute directly to varying
186 res << "void main()\n";
188 res << " gl_Position = dEQP_Position;\n";
189 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
191 const Value& val = spec.values.inputs[ndx];
192 const string& name = val.name;
194 res << " " << (getTransportType(val.type.getBasicType()) != val.type.getBasicType() ? "v_" : "")
195 << name << " = a_" << name << ";\n";
202 void genCompareOp (ostringstream& output, const char* dstVec4Var, const ValueBlock& valueBlock, const char* checkVarName)
204 bool isFirstOutput = true;
206 for (size_t ndx = 0; ndx < valueBlock.outputs.size(); ndx++)
208 const Value& val = valueBlock.outputs[ndx];
210 // Check if we're only interested in one variable (then skip if not the right one).
211 if (checkVarName && val.name != checkVarName)
217 output << "bool RES = ";
218 isFirstOutput = false;
221 output << "RES = RES && ";
223 // Generate actual comparison.
224 if (getDataTypeScalarType(val.type.getBasicType()) == glu::TYPE_FLOAT)
225 output << "isOk(" << val.name << ", ref." << val.name << ", 0.05);\n";
227 output << "isOk(" << val.name << ", ref." << val.name << ");\n";
231 output << dstVec4Var << " = vec4(1.0);\n";
233 output << dstVec4Var << " = vec4(RES, RES, RES, 1.0);\n";
236 string genFragmentShader (const ShaderCaseSpecification& spec)
238 ostringstream shader;
242 shader << glu::getGLSLVersionDeclaration(spec.targetVersion) << "\n";
244 shader << "precision highp float;\n";
245 shader << "precision highp int;\n";
248 shader << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
251 genCompareFunctions(shader, spec.values, false);
254 // Declarations (varying, reference for each output).
255 for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
257 const Value& val = spec.values.outputs[ndx];
258 const DataType valueType = val.type.getBasicType();
259 const char* const valueTypeStr = getDataTypeName(valueType);
260 const DataType transportType = getTransportType(valueType);
261 const char* const transportTypeStr = getDataTypeName(transportType);
262 const int numLocs = getNumTransportLocations(valueType);
264 shader << "layout(location = " << curInLoc << ") flat in " << transportTypeStr << " " << (valueType != transportType ? "v_" : "") << val.name << ";\n";
266 if (valueType != transportType)
267 setup << " " << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(v_" << val.name << ");\n";
272 declareReferenceBlock(shader, spec.values);
275 shader << "void main()\n";
278 shader << setup.str();
281 genCompareOp(shader, "dEQP_FragColor", spec.values, DE_NULL);
287 // Specialize a shader for the vertex shader test case.
288 string specializeVertexShader (const ShaderCaseSpecification& spec, const string& src)
292 ostringstream output;
294 int curOutputLoc = 0;
296 // generated from "both" case
297 DE_ASSERT(spec.caseType == glu::sl::CASETYPE_VERTEX_ONLY);
299 // Output (write out position).
300 output << "gl_Position = dEQP_Position;\n";
302 // Declarations (position + attribute for each input, varying for each output).
303 decl << "layout(location = 0) in highp vec4 dEQP_Position;\n";
306 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
308 const Value& val = spec.values.inputs[ndx];
309 const DataType valueType = val.type.getBasicType();
310 const char* const valueTypeStr = getDataTypeName(valueType);
311 const DataType transportType = getTransportType(valueType);
312 const char* const transportTypeStr = getDataTypeName(transportType);
313 const int numLocs = getNumTransportLocations(valueType);
315 decl << "layout(location = " << curInputLoc << ") in ";
317 curInputLoc += numLocs;
319 if (valueType == transportType)
320 decl << transportTypeStr << " " << val.name << ";\n";
323 decl << transportTypeStr << " a_" << val.name << ";\n";
324 setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(a_" << val.name << ");\n";
328 declareUniforms(decl, spec.values);
330 for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
332 const Value& val = spec.values.outputs[ndx];
333 const DataType valueType = val.type.getBasicType();
334 const char* const valueTypeStr = getDataTypeName(valueType);
335 const DataType transportType = getTransportType(valueType);
336 const char* const transportTypeStr = getDataTypeName(transportType);
337 const int numLocs = getNumTransportLocations(valueType);
339 decl << "layout(location = " << curOutputLoc << ") flat out ";
341 curOutputLoc += numLocs;
343 if (valueType == transportType)
344 decl << transportTypeStr << " " << val.name << ";\n";
347 decl << transportTypeStr << " v_" << val.name << ";\n";
348 decl << valueTypeStr << " " << val.name << ";\n";
350 output << "v_" << val.name << " = " << transportTypeStr << "(" << val.name << ");\n";
354 // Shader specialization.
355 map<string, string> params;
356 params.insert(pair<string, string>("DECLARATIONS", decl.str()));
357 params.insert(pair<string, string>("SETUP", setup.str()));
358 params.insert(pair<string, string>("OUTPUT", output.str()));
359 params.insert(pair<string, string>("POSITION_FRAG_COLOR", "gl_Position"));
361 StringTemplate tmpl (src);
362 const string baseSrc = tmpl.specialize(params);
363 const string withExt = injectExtensionRequirements(baseSrc, spec.programs[0].requiredExtensions, glu::SHADERTYPE_VERTEX);
368 // Specialize a shader for the fragment shader test case.
369 string specializeFragmentShader (const ShaderCaseSpecification& spec, const string& src)
373 ostringstream output;
376 // generated from "both" case
377 DE_ASSERT(spec.caseType == glu::sl::CASETYPE_FRAGMENT_ONLY);
379 genCompareFunctions(decl, spec.values, false);
380 genCompareOp(output, "dEQP_FragColor", spec.values, DE_NULL);
382 decl << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
384 for (size_t ndx = 0; ndx < spec.values.inputs.size(); ndx++)
386 const Value& val = spec.values.inputs[ndx];
387 const DataType valueType = val.type.getBasicType();
388 const char* const valueTypeStr = getDataTypeName(valueType);
389 const DataType transportType = getTransportType(valueType);
390 const char* const transportTypeStr = getDataTypeName(transportType);
391 const int numLocs = getNumTransportLocations(valueType);
393 decl << "layout(location = " << curInputLoc << ") flat in ";
395 curInputLoc += numLocs;
397 if (valueType == transportType)
398 decl << transportTypeStr << " " << val.name << ";\n";
401 decl << transportTypeStr << " v_" << val.name << ";\n";
402 setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(v_" << val.name << ");\n";
406 declareUniforms(decl, spec.values);
407 declareReferenceBlock(decl, spec.values);
409 for (size_t ndx = 0; ndx < spec.values.outputs.size(); ndx++)
411 const Value& val = spec.values.outputs[ndx];
412 const DataType basicType = val.type.getBasicType();
413 const char* const refTypeStr = getDataTypeName(basicType);
415 decl << refTypeStr << " " << val.name << ";\n";
418 // Shader specialization.
419 map<string, string> params;
420 params.insert(pair<string, string>("DECLARATIONS", decl.str()));
421 params.insert(pair<string, string>("SETUP", setup.str()));
422 params.insert(pair<string, string>("OUTPUT", output.str()));
423 params.insert(pair<string, string>("POSITION_FRAG_COLOR", "dEQP_FragColor"));
425 StringTemplate tmpl (src);
426 const string baseSrc = tmpl.specialize(params);
427 const string withExt = injectExtensionRequirements(baseSrc, spec.programs[0].requiredExtensions, glu::SHADERTYPE_FRAGMENT);
432 map<string, string> generateVertexSpecialization (const ProgramSpecializationParams& specParams)
436 map<string, string> params;
439 decl << "layout(location = 0) in highp vec4 dEQP_Position;\n";
442 for (size_t ndx = 0; ndx < specParams.caseSpec.values.inputs.size(); ndx++)
444 const Value& val = specParams.caseSpec.values.inputs[ndx];
445 const DataType valueType = val.type.getBasicType();
446 const char* const valueTypeStr = getDataTypeName(valueType);
447 const DataType transportType = getTransportType(valueType);
448 const char* const transportTypeStr = getDataTypeName(transportType);
449 const int numLocs = getNumTransportLocations(valueType);
451 decl << "layout(location = " << curInputLoc << ") in ";
453 curInputLoc += numLocs;
455 if (valueType == transportType)
456 decl << transportTypeStr << " " << val.name << ";\n";
459 decl << transportTypeStr << " a_" << val.name << ";\n";
460 setup << valueTypeStr << " " << val.name << " = " << valueTypeStr << "(a_" << val.name << ");\n";
464 declareUniforms(decl, specParams.caseSpec.values);
466 params.insert(pair<string, string>("VERTEX_DECLARATIONS", decl.str()));
467 params.insert(pair<string, string>("VERTEX_SETUP", setup.str()));
468 params.insert(pair<string, string>("VERTEX_OUTPUT", string("gl_Position = dEQP_Position;\n")));
473 map<string, string> generateFragmentSpecialization (const ProgramSpecializationParams& specParams)
476 ostringstream output;
477 map<string, string> params;
479 genCompareFunctions(decl, specParams.caseSpec.values, false);
480 genCompareOp(output, "dEQP_FragColor", specParams.caseSpec.values, DE_NULL);
482 decl << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
484 for (size_t ndx = 0; ndx < specParams.caseSpec.values.outputs.size(); ndx++)
486 const Value& val = specParams.caseSpec.values.outputs[ndx];
487 const char* const refTypeStr = getDataTypeName(val.type.getBasicType());
489 decl << refTypeStr << " " << val.name << ";\n";
492 declareReferenceBlock(decl, specParams.caseSpec.values);
493 declareUniforms(decl, specParams.caseSpec.values);
495 params.insert(pair<string, string>("FRAGMENT_DECLARATIONS", decl.str()));
496 params.insert(pair<string, string>("FRAGMENT_OUTPUT", output.str()));
497 params.insert(pair<string, string>("FRAG_COLOR", "dEQP_FragColor"));
502 map<string, string> generateGeometrySpecialization (const ProgramSpecializationParams& specParams)
505 map<string, string> params;
507 decl << "layout (triangles) in;\n";
508 decl << "layout (triangle_strip, max_vertices=3) out;\n";
511 declareUniforms(decl, specParams.caseSpec.values);
513 params.insert(pair<string, string>("GEOMETRY_DECLARATIONS", decl.str()));
518 map<string, string> generateTessControlSpecialization (const ProgramSpecializationParams& specParams)
521 ostringstream output;
522 map<string, string> params;
524 decl << "layout (vertices=3) out;\n";
527 declareUniforms(decl, specParams.caseSpec.values);
529 output << "gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
530 "gl_TessLevelInner[0] = 2.0;\n"
531 "gl_TessLevelInner[1] = 2.0;\n"
532 "gl_TessLevelOuter[0] = 2.0;\n"
533 "gl_TessLevelOuter[1] = 2.0;\n"
534 "gl_TessLevelOuter[2] = 2.0;\n"
535 "gl_TessLevelOuter[3] = 2.0;";
537 params.insert(pair<string, string>("TESSELLATION_CONTROL_DECLARATIONS", decl.str()));
538 params.insert(pair<string, string>("TESSELLATION_CONTROL_OUTPUT", output.str()));
539 params.insert(pair<string, string>("GL_MAX_PATCH_VERTICES", de::toString(specParams.maxPatchVertices)));
544 map<string, string> generateTessEvalSpecialization (const ProgramSpecializationParams& specParams)
547 ostringstream output;
548 map<string, string> params;
550 decl << "layout (triangles) in;\n";
553 declareUniforms(decl, specParams.caseSpec.values);
555 output << "gl_Position = gl_TessCoord[0] * gl_in[0].gl_Position + gl_TessCoord[1] * gl_in[1].gl_Position + gl_TessCoord[2] * gl_in[2].gl_Position;\n";
557 params.insert(pair<string, string>("TESSELLATION_EVALUATION_DECLARATIONS", decl.str()));
558 params.insert(pair<string, string>("TESSELLATION_EVALUATION_OUTPUT", output.str()));
559 params.insert(pair<string, string>("GL_MAX_PATCH_VERTICES", de::toString(specParams.maxPatchVertices)));
564 void specializeShaderSources (ProgramSources& dst,
565 const ProgramSources& src,
566 const ProgramSpecializationParams& specParams,
567 glu::ShaderType shaderType,
568 map<string, string> (*specializationGenerator) (const ProgramSpecializationParams& specParams))
570 if (!src.sources[shaderType].empty())
572 const map<string, string> tmplParams = specializationGenerator(specParams);
574 for (size_t ndx = 0; ndx < src.sources[shaderType].size(); ++ndx)
576 const StringTemplate tmpl (src.sources[shaderType][ndx]);
577 const string baseGLSLCode = tmpl.specialize(tmplParams);
578 const string sourceWithExts = injectExtensionRequirements(baseGLSLCode, specParams.requiredExtensions, shaderType);
580 dst << glu::ShaderSource(shaderType, sourceWithExts);
585 void specializeProgramSources (glu::ProgramSources& dst,
586 const glu::ProgramSources& src,
587 const ProgramSpecializationParams& specParams)
589 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_VERTEX, generateVertexSpecialization);
590 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_FRAGMENT, generateFragmentSpecialization);
591 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_GEOMETRY, generateGeometrySpecialization);
592 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_TESSELLATION_CONTROL, generateTessControlSpecialization);
593 specializeShaderSources(dst, src, specParams, glu::SHADERTYPE_TESSELLATION_EVALUATION, generateTessEvalSpecialization);
595 dst << glu::ProgramSeparable(src.separable);
598 struct ValueBufferLayout
603 int vecStride; //! Applies to matrices only
605 Entry (void) : offset(0), vecStride(0) {}
606 Entry (int offset_, int vecStride_) : offset(offset_), vecStride(vecStride_) {}
609 vector<Entry> entries;
612 ValueBufferLayout (void) : size(0) {}
615 ValueBufferLayout computeStd140Layout (const vector<Value>& values)
617 ValueBufferLayout layout;
619 layout.entries.resize(values.size());
621 for (size_t ndx = 0; ndx < values.size(); ++ndx)
623 const DataType basicType = values[ndx].type.getBasicType();
624 const bool isMatrix = isDataTypeMatrix(basicType);
625 const int numVecs = isMatrix ? getDataTypeMatrixNumColumns(basicType) : 1;
626 const DataType vecType = isMatrix ? glu::getDataTypeFloatVec(getDataTypeMatrixNumRows(basicType)) : basicType;
627 const int vecSize = getDataTypeScalarSize(vecType);
628 const int alignment = ((isMatrix || vecSize == 3) ? 4 : vecSize)*int(sizeof(deUint32));
630 layout.size = deAlign32(layout.size, alignment);
631 layout.entries[ndx] = ValueBufferLayout::Entry(layout.size, alignment);
632 layout.size += alignment*(numVecs-1) + vecSize*int(sizeof(deUint32));
638 ValueBufferLayout computeStd430Layout (const vector<Value>& values)
640 ValueBufferLayout layout;
642 layout.entries.resize(values.size());
644 for (size_t ndx = 0; ndx < values.size(); ++ndx)
646 const DataType basicType = values[ndx].type.getBasicType();
647 const int numVecs = isDataTypeMatrix(basicType) ? getDataTypeMatrixNumColumns(basicType) : 1;
648 const DataType vecType = isDataTypeMatrix(basicType) ? glu::getDataTypeFloatVec(getDataTypeMatrixNumRows(basicType)) : basicType;
649 const int vecSize = getDataTypeScalarSize(vecType);
650 const int alignment = (vecSize == 3 ? 4 : vecSize)*int(sizeof(deUint32));
652 layout.size = deAlign32(layout.size, alignment);
653 layout.entries[ndx] = ValueBufferLayout::Entry(layout.size, alignment);
654 layout.size += alignment*(numVecs-1) + vecSize*int(sizeof(deUint32));
660 void copyToLayout (void* dst, const ValueBufferLayout::Entry& entryLayout, const Value& value, int arrayNdx)
662 const DataType basicType = value.type.getBasicType();
663 const int scalarSize = getDataTypeScalarSize(basicType);
664 const int numVecs = isDataTypeMatrix(basicType) ? getDataTypeMatrixNumColumns(basicType) : 1;
665 const int numComps = isDataTypeMatrix(basicType) ? getDataTypeMatrixNumRows(basicType) : scalarSize;
667 DE_ASSERT(size_t((arrayNdx+1)*scalarSize) <= value.elements.size());
669 if (isDataTypeBoolOrBVec(basicType))
671 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
673 for (int compNdx = 0; compNdx < numComps; compNdx++)
675 const deUint32 data = value.elements[arrayNdx*scalarSize + vecNdx*numComps + compNdx].bool32 ? ~0u : 0u;
677 deMemcpy((deUint8*)dst + entryLayout.offset + vecNdx*entryLayout.vecStride + compNdx * sizeof(deUint32),
685 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
686 deMemcpy((deUint8*)dst + entryLayout.offset + vecNdx*entryLayout.vecStride,
687 &value.elements[arrayNdx*scalarSize + vecNdx*numComps],
688 numComps*sizeof(deUint32));
692 void copyToLayout (void* dst, const ValueBufferLayout& layout, const vector<Value>& values, int arrayNdx)
694 DE_ASSERT(layout.entries.size() == values.size());
696 for (size_t ndx = 0; ndx < values.size(); ndx++)
697 copyToLayout(dst, layout.entries[ndx], values[ndx], arrayNdx);
700 deUint32 getShaderStages (const ShaderCaseSpecification& spec)
702 if (spec.caseType == glu::sl::CASETYPE_COMPLETE)
704 deUint32 stages = 0u;
706 for (size_t progNdx = 0; progNdx < spec.programs.size(); progNdx++)
708 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
710 if (!spec.programs[progNdx].sources.sources[shaderType].empty())
711 stages |= (1u << shaderType);
718 return (1u << glu::SHADERTYPE_VERTEX) | (1u << glu::SHADERTYPE_FRAGMENT);
721 class PipelineProgram
724 PipelineProgram (Context& context, const ShaderCaseSpecification& spec);
726 deUint32 getStages (void) const { return m_stages; }
728 bool hasShader (glu::ShaderType type) const { return (m_stages & (1u << type)) != 0; }
729 vk::VkShaderModule getShader (glu::ShaderType type) const { return *m_shaderModules[type]; }
732 const deUint32 m_stages;
733 Move<vk::VkShaderModule> m_shaderModules[glu::SHADERTYPE_LAST];
736 PipelineProgram::PipelineProgram (Context& context, const ShaderCaseSpecification& spec)
737 : m_stages(getShaderStages(spec))
739 // \note Currently only a single source program is supported as framework lacks SPIR-V linking capability
740 TCU_CHECK_INTERNAL(spec.programs.size() == 1);
742 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
744 if ((m_stages & (1u << shaderType)) != 0)
746 m_shaderModules[shaderType] = vk::createShaderModule(context.getDeviceInterface(), context.getDevice(),
747 context.getBinaryCollection().get(getShaderName((glu::ShaderType)shaderType, 0)), 0u);
752 vector<vk::VkPipelineShaderStageCreateInfo> getPipelineShaderStageCreateInfo (const PipelineProgram& program)
754 vector<vk::VkPipelineShaderStageCreateInfo> infos;
756 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
758 if (program.hasShader((glu::ShaderType)shaderType))
760 const vk::VkPipelineShaderStageCreateInfo info =
762 vk::VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // sType
764 (vk::VkPipelineShaderStageCreateFlags)0,
765 vk::getVkShaderStage((glu::ShaderType)shaderType), // stage
766 program.getShader((glu::ShaderType)shaderType), // module
768 DE_NULL, // pSpecializationInfo
771 infos.push_back(info);
778 Move<vk::VkBuffer> createBuffer (Context& context, vk::VkDeviceSize size, vk::VkBufferUsageFlags usageFlags)
780 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
781 const vk::VkBufferCreateInfo params =
783 vk::VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
788 vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
789 1u, // queueFamilyCount
790 &queueFamilyIndex, // pQueueFamilyIndices
793 return vk::createBuffer(context.getDeviceInterface(), context.getDevice(), ¶ms);
796 Move<vk::VkImage> createImage2D (Context& context, deUint32 width, deUint32 height, vk::VkFormat format, vk::VkImageTiling tiling, vk::VkImageUsageFlags usageFlags)
798 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
799 const vk::VkImageCreateInfo params =
801 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
804 vk::VK_IMAGE_TYPE_2D, // imageType
806 { width, height, 1u }, // extent
809 vk::VK_SAMPLE_COUNT_1_BIT, // samples
812 vk::VK_SHARING_MODE_EXCLUSIVE, // sharingMode
813 1u, // queueFamilyCount
814 &queueFamilyIndex, // pQueueFamilyIndices
815 vk::VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
818 return vk::createImage(context.getDeviceInterface(), context.getDevice(), ¶ms);
821 Move<vk::VkImageView> createAttachmentView (Context& context, vk::VkImage image, vk::VkFormat format)
823 const vk::VkImageViewCreateInfo params =
825 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
829 vk::VK_IMAGE_VIEW_TYPE_2D, // viewType
831 vk::makeComponentMappingRGBA(), // channels
833 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
836 0u, // baseArrayLayer
838 }, // subresourceRange
841 return vk::createImageView(context.getDeviceInterface(), context.getDevice(), ¶ms);
844 Move<vk::VkRenderPass> createRenderPass (Context& context, vk::VkFormat colorAttFormat)
846 const vk::VkAttachmentDescription colorAttDesc =
849 colorAttFormat, // format
850 vk::VK_SAMPLE_COUNT_1_BIT, // samples
851 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
852 vk::VK_ATTACHMENT_STORE_OP_STORE, // storeOp
853 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
854 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilStoreOp
855 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // initialLayout
856 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // finalLayout
858 const vk::VkAttachmentReference colorAttRef =
861 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // layout
863 const vk::VkAttachmentReference dsAttRef =
865 VK_ATTACHMENT_UNUSED, // attachment
866 vk::VK_IMAGE_LAYOUT_GENERAL, // layout
868 const vk::VkSubpassDescription subpassDesc =
870 (vk::VkSubpassDescriptionFlags)0,
871 vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
873 DE_NULL, // pInputAttachments
875 &colorAttRef, // pColorAttachments
876 DE_NULL, // pResolveAttachments
877 &dsAttRef, // depthStencilAttachment
879 DE_NULL, // pPreserveAttachments
882 const vk::VkRenderPassCreateInfo renderPassParams =
884 vk::VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // sType
886 (vk::VkRenderPassCreateFlags)0,
887 1u, // attachmentCount
888 &colorAttDesc, // pAttachments
890 &subpassDesc, // pSubpasses
891 0u, // dependencyCount
892 DE_NULL, // pDependencies
895 return vk::createRenderPass(context.getDeviceInterface(), context.getDevice(), &renderPassParams);
898 vk::VkShaderStageFlags getVkStageFlags (deUint32 stages)
900 vk::VkShaderStageFlags vkStages = 0u;
902 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
904 if ((stages & (1u << shaderType)) != 0)
905 vkStages |= vk::getVkShaderStage((glu::ShaderType)shaderType);
911 Move<vk::VkDescriptorSetLayout> createDescriptorSetLayout (Context& context, deUint32 shaderStages)
913 DE_STATIC_ASSERT(REFERENCE_UNIFORM_BINDING == 0);
914 DE_STATIC_ASSERT(USER_UNIFORM_BINDING == 1);
916 return vk::DescriptorSetLayoutBuilder()
917 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_FRAGMENT_BIT)
918 .addSingleBinding(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, getVkStageFlags(shaderStages))
919 .build(context.getDeviceInterface(), context.getDevice());
922 Move<vk::VkPipelineLayout> createPipelineLayout (Context& context, vk::VkDescriptorSetLayout descriptorSetLayout)
924 const vk::VkPipelineLayoutCreateInfo params =
926 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
928 (vk::VkPipelineLayoutCreateFlags)0,
929 1u, // descriptorSetCount
930 &descriptorSetLayout, // pSetLayouts
931 0u, // pushConstantRangeCount
932 DE_NULL, // pPushConstantRanges
935 return vk::createPipelineLayout(context.getDeviceInterface(), context.getDevice(), ¶ms);
938 vk::VkFormat getVecFormat (DataType scalarType, int scalarSize)
942 case glu::TYPE_FLOAT:
944 const vk::VkFormat vecFmts[] =
946 vk::VK_FORMAT_R32_SFLOAT,
947 vk::VK_FORMAT_R32G32_SFLOAT,
948 vk::VK_FORMAT_R32G32B32_SFLOAT,
949 vk::VK_FORMAT_R32G32B32A32_SFLOAT,
951 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
956 const vk::VkFormat vecFmts[] =
958 vk::VK_FORMAT_R32_SINT,
959 vk::VK_FORMAT_R32G32_SINT,
960 vk::VK_FORMAT_R32G32B32_SINT,
961 vk::VK_FORMAT_R32G32B32A32_SINT,
963 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
968 const vk::VkFormat vecFmts[] =
970 vk::VK_FORMAT_R32_UINT,
971 vk::VK_FORMAT_R32G32_UINT,
972 vk::VK_FORMAT_R32G32B32_UINT,
973 vk::VK_FORMAT_R32G32B32A32_UINT,
975 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
980 const vk::VkFormat vecFmts[] =
982 vk::VK_FORMAT_R32_UINT,
983 vk::VK_FORMAT_R32G32_UINT,
984 vk::VK_FORMAT_R32G32B32_UINT,
985 vk::VK_FORMAT_R32G32B32A32_UINT,
987 return de::getSizedArrayElement<4>(vecFmts, scalarSize-1);
991 DE_FATAL("Unknown scalar type");
992 return vk::VK_FORMAT_R8G8B8A8_UINT;
996 vector<vk::VkVertexInputAttributeDescription> getVertexAttributeDescriptions (const vector<Value>& inputValues, const ValueBufferLayout& layout)
998 vector<vk::VkVertexInputAttributeDescription> attribs;
1002 const vk::VkVertexInputAttributeDescription posDesc =
1006 vk::VK_FORMAT_R32G32_SFLOAT, // format
1010 attribs.push_back(posDesc);
1014 for (size_t inputNdx = 0; inputNdx < inputValues.size(); inputNdx++)
1016 const Value& input = inputValues[inputNdx];
1017 const ValueBufferLayout::Entry& layoutEntry = layout.entries[inputNdx];
1018 const DataType basicType = input.type.getBasicType();
1019 const int numVecs = isDataTypeMatrix(basicType)
1020 ? getDataTypeMatrixNumColumns(basicType)
1022 const int vecSize = isDataTypeMatrix(basicType)
1023 ? getDataTypeMatrixNumRows(basicType)
1024 : getDataTypeScalarSize(basicType);
1025 const DataType scalarType = getDataTypeScalarType(basicType);
1026 const vk::VkFormat vecFmt = getVecFormat(scalarType, vecSize);
1028 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
1030 const deUint32 curLoc = (deUint32)attribs.size();
1031 const deUint32 offset = (deUint32)(layoutEntry.offset + layoutEntry.vecStride*vecNdx);
1032 const vk::VkVertexInputAttributeDescription desc =
1040 attribs.push_back(desc);
1047 Move<vk::VkPipeline> createPipeline (Context& context,
1048 const vector<Value>& inputValues,
1049 const ValueBufferLayout& inputLayout,
1050 const PipelineProgram& program,
1051 vk::VkRenderPass renderPass,
1052 vk::VkPipelineLayout pipelineLayout,
1053 tcu::UVec2 renderSize)
1055 const vector<vk::VkPipelineShaderStageCreateInfo> shaderStageParams (getPipelineShaderStageCreateInfo(program));
1056 const vector<vk::VkVertexInputAttributeDescription> vertexAttribParams (getVertexAttributeDescriptions(inputValues, inputLayout));
1057 const vk::VkPipelineDepthStencilStateCreateInfo depthStencilParams =
1059 vk::VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // sType
1061 (vk::VkPipelineDepthStencilStateCreateFlags)0,
1062 VK_FALSE, // depthTestEnable
1063 VK_FALSE, // depthWriteEnable
1064 vk::VK_COMPARE_OP_ALWAYS, // depthCompareOp
1065 VK_FALSE, // depthBoundsTestEnable
1066 VK_FALSE, // stencilTestEnable
1068 vk::VK_STENCIL_OP_KEEP, // stencilFailOp;
1069 vk::VK_STENCIL_OP_KEEP, // stencilPassOp;
1070 vk::VK_STENCIL_OP_KEEP, // stencilDepthFailOp;
1071 vk::VK_COMPARE_OP_ALWAYS, // stencilCompareOp;
1072 0u, // stencilCompareMask
1073 0u, // stencilWriteMask
1074 0u, // stencilReference
1077 vk::VK_STENCIL_OP_KEEP, // stencilFailOp;
1078 vk::VK_STENCIL_OP_KEEP, // stencilPassOp;
1079 vk::VK_STENCIL_OP_KEEP, // stencilDepthFailOp;
1080 vk::VK_COMPARE_OP_ALWAYS, // stencilCompareOp;
1081 0u, // stencilCompareMask
1082 0u, // stencilWriteMask
1083 0u, // stencilReference
1085 -1.0f, // minDepthBounds
1086 +1.0f, // maxDepthBounds
1088 const vk::VkViewport viewport0 =
1092 (float)renderSize.x(), // width
1093 (float)renderSize.y(), // height
1097 const vk::VkRect2D scissor0 =
1099 { 0u, 0u }, // offset
1100 { renderSize.x(), renderSize.y() } // extent
1102 const vk::VkPipelineViewportStateCreateInfo viewportParams =
1104 vk::VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // sType
1106 (vk::VkPipelineViewportStateCreateFlags)0,
1107 1u, // viewportCount
1108 &viewport0, // pViewports
1110 &scissor0, // pScissors
1112 const vk::VkPipelineMultisampleStateCreateInfo multisampleParams =
1114 vk::VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // sType
1116 (vk::VkPipelineMultisampleStateCreateFlags)0,
1117 vk::VK_SAMPLE_COUNT_1_BIT, // rasterSamples
1118 DE_FALSE, // sampleShadingEnable
1119 0.0f, // minSampleShading
1120 DE_NULL, // pSampleMask
1121 VK_FALSE, // alphaToCoverageEnable
1122 VK_FALSE, // alphaToOneEnable
1124 const vk::VkPipelineRasterizationStateCreateInfo rasterParams =
1126 vk::VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
1128 (vk::VkPipelineRasterizationStateCreateFlags)0,
1129 DE_TRUE, // depthClipEnable
1130 DE_FALSE, // rasterizerDiscardEnable
1131 vk::VK_POLYGON_MODE_FILL, // fillMode
1132 vk::VK_CULL_MODE_NONE, // cullMode;
1133 vk::VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace;
1134 VK_FALSE, // depthBiasEnable
1135 0.0f, // depthBiasConstantFactor
1136 0.0f, // depthBiasClamp
1137 0.0f, // depthBiasSlopeFactor
1140 const vk::VkPipelineInputAssemblyStateCreateInfo inputAssemblyParams =
1142 vk::VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // sType
1144 (vk::VkPipelineInputAssemblyStateCreateFlags)0,
1145 vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // topology
1146 DE_FALSE, // primitiveRestartEnable
1148 const vk::VkVertexInputBindingDescription vertexBindings[] =
1152 (deUint32)sizeof(tcu::Vec2), // stride
1153 vk::VK_VERTEX_INPUT_RATE_VERTEX, // stepRate
1158 vk::VK_VERTEX_INPUT_RATE_INSTANCE, // stepRate
1161 const vk::VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
1163 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // sType
1165 (vk::VkPipelineVertexInputStateCreateFlags)0,
1166 (inputValues.empty() ? 1u : 2u), // bindingCount
1167 vertexBindings, // pVertexBindingDescriptions
1168 (deUint32)vertexAttribParams.size(), // attributeCount
1169 &vertexAttribParams[0], // pVertexAttributeDescriptions
1171 const vk::VkColorComponentFlags allCompMask = vk::VK_COLOR_COMPONENT_R_BIT
1172 | vk::VK_COLOR_COMPONENT_G_BIT
1173 | vk::VK_COLOR_COMPONENT_B_BIT
1174 | vk::VK_COLOR_COMPONENT_A_BIT;
1175 const vk::VkPipelineColorBlendAttachmentState attBlendParams =
1177 VK_FALSE, // blendEnable
1178 vk::VK_BLEND_FACTOR_ONE, // srcBlendColor
1179 vk::VK_BLEND_FACTOR_ZERO, // destBlendColor
1180 vk::VK_BLEND_OP_ADD, // blendOpColor
1181 vk::VK_BLEND_FACTOR_ONE, // srcBlendAlpha
1182 vk::VK_BLEND_FACTOR_ZERO, // destBlendAlpha
1183 vk::VK_BLEND_OP_ADD, // blendOpAlpha
1184 allCompMask, // componentWriteMask
1186 const vk::VkPipelineColorBlendStateCreateInfo blendParams =
1188 vk::VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // sType
1190 (vk::VkPipelineColorBlendStateCreateFlags)0,
1191 VK_FALSE, // logicOpEnable
1192 vk::VK_LOGIC_OP_COPY, // logicOp
1193 1u, // attachmentCount
1194 &attBlendParams, // pAttachments
1195 { 0.0f, 0.0f, 0.0f, 0.0f }, // blendConstants
1197 const vk::VkGraphicsPipelineCreateInfo pipelineParams =
1199 vk::VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // sType
1202 (deUint32)shaderStageParams.size(), // stageCount
1203 &shaderStageParams[0], // pStages
1204 &vertexInputStateParams, // pVertexInputState
1205 &inputAssemblyParams, // pInputAssemblyState
1206 DE_NULL, // pTessellationState
1207 &viewportParams, // pViewportState
1208 &rasterParams, // pRasterState
1209 &multisampleParams, // pMultisampleState
1210 &depthStencilParams, // pDepthStencilState
1211 &blendParams, // pColorBlendState
1212 (const vk::VkPipelineDynamicStateCreateInfo*)DE_NULL, // pDynamicState
1213 pipelineLayout, // layout
1214 renderPass, // renderPass
1216 DE_NULL, // basePipelineHandle
1217 0u, // basePipelineIndex
1220 return vk::createGraphicsPipeline(context.getDeviceInterface(), context.getDevice(), DE_NULL, &pipelineParams);
1223 Move<vk::VkFramebuffer> createFramebuffer (Context& context, vk::VkRenderPass renderPass, vk::VkImageView colorAttView, int width, int height)
1225 const vk::VkFramebufferCreateInfo framebufferParams =
1227 vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
1229 (vk::VkFramebufferCreateFlags)0,
1230 renderPass, // renderPass
1231 1u, // attachmentCount
1232 &colorAttView, // pAttachments
1233 (deUint32)width, // width
1234 (deUint32)height, // height
1238 return vk::createFramebuffer(context.getDeviceInterface(), context.getDevice(), &framebufferParams);
1241 Move<vk::VkCommandPool> createCommandPool (Context& context)
1243 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1245 return vk::createCommandPool(context.getDeviceInterface(), context.getDevice(), (vk::VkCommandPoolCreateFlags)0u, queueFamilyIndex);
1248 Move<vk::VkDescriptorPool> createDescriptorPool (Context& context)
1250 return vk::DescriptorPoolBuilder()
1251 .addType(vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 2u)
1252 .build(context.getDeviceInterface(), context.getDevice(), vk::VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1255 Move<vk::VkDescriptorSet> allocateDescriptorSet (Context& context, vk::VkDescriptorPool descriptorPool, vk::VkDescriptorSetLayout setLayout)
1257 const vk::VkDescriptorSetAllocateInfo params =
1259 vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
1266 return vk::allocateDescriptorSet(context.getDeviceInterface(), context.getDevice(), ¶ms);
1269 Move<vk::VkCommandBuffer> allocateCommandBuffer (Context& context, vk::VkCommandPool cmdPool)
1271 return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1274 MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
1276 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1277 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
1278 MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
1280 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
1285 vk::VkFormat getRenderTargetFormat (DataType dataType)
1289 case glu::TYPE_FLOAT_VEC2:
1290 return vk::VK_FORMAT_R8G8_UNORM;
1291 case glu::TYPE_FLOAT_VEC3:
1292 return vk::VK_FORMAT_R5G6B5_UNORM_PACK16;
1293 case glu::TYPE_FLOAT_VEC4:
1294 return vk::VK_FORMAT_R8G8B8A8_UNORM;
1295 case glu::TYPE_INT_VEC2:
1296 return vk::VK_FORMAT_R8G8_SINT;
1297 case glu::TYPE_INT_VEC4:
1298 return vk::VK_FORMAT_R8G8B8A8_SINT;
1300 return vk::VK_FORMAT_R8G8B8A8_UNORM;
1304 MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkImage image, vk::MemoryRequirement memReqs)
1306 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1307 const vk::VkMemoryRequirements imgReqs = vk::getImageMemoryRequirements(vkd, context.getDevice(), image);
1308 MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(imgReqs, memReqs);
1310 vkd.bindImageMemory(context.getDevice(), image, memory->getMemory(), memory->getOffset());
1315 void writeValuesToMem (Context& context, const vk::Allocation& dst, const ValueBufferLayout& layout, const vector<Value>& values, int arrayNdx)
1317 copyToLayout(dst.getHostPtr(), layout, values, arrayNdx);
1319 // \note Buffers are not allocated with coherency / uncached requirement so we need to manually flush CPU write caches
1320 flushMappedMemoryRange(context.getDeviceInterface(), context.getDevice(), dst.getMemory(), dst.getOffset(), (vk::VkDeviceSize)layout.size);
1323 class ShaderCaseInstance : public TestInstance
1326 ShaderCaseInstance (Context& context, const ShaderCaseSpecification& spec);
1327 ~ShaderCaseInstance (void);
1329 TestStatus iterate (void);
1337 POSITIONS_OFFSET = 0,
1338 POSITIONS_SIZE = (int)sizeof(Vec2)*4,
1340 INDICES_OFFSET = POSITIONS_SIZE,
1341 INDICES_SIZE = (int)sizeof(deUint16)*6,
1343 TOTAL_POS_NDX_SIZE = POSITIONS_SIZE+INDICES_SIZE
1346 const ShaderCaseSpecification& m_spec;
1348 const Unique<vk::VkBuffer> m_posNdxBuffer;
1349 const UniquePtr<vk::Allocation> m_posNdxMem;
1351 const ValueBufferLayout m_inputLayout;
1352 const Unique<vk::VkBuffer> m_inputBuffer; // Input values (attributes). Can be NULL if no inputs present
1353 const UniquePtr<vk::Allocation> m_inputMem; // Input memory, can be NULL if no input buffer exists
1355 const ValueBufferLayout m_referenceLayout;
1356 const Unique<vk::VkBuffer> m_referenceBuffer; // Output (reference) values. Can be NULL if no outputs present
1357 const UniquePtr<vk::Allocation> m_referenceMem; // Output (reference) memory, can be NULL if no reference buffer exists
1359 const ValueBufferLayout m_uniformLayout;
1360 const Unique<vk::VkBuffer> m_uniformBuffer; // Uniform values. Can be NULL if no uniforms present
1361 const UniquePtr<vk::Allocation> m_uniformMem; // Uniform memory, can be NULL if no uniform buffer exists
1363 const vk::VkFormat m_rtFormat;
1364 const Unique<vk::VkImage> m_rtImage;
1365 const UniquePtr<vk::Allocation> m_rtMem;
1366 const Unique<vk::VkImageView> m_rtView;
1368 const Unique<vk::VkBuffer> m_readImageBuffer;
1369 const UniquePtr<vk::Allocation> m_readImageMem;
1371 const Unique<vk::VkRenderPass> m_renderPass;
1372 const Unique<vk::VkFramebuffer> m_framebuffer;
1373 const PipelineProgram m_program;
1374 const Unique<vk::VkDescriptorSetLayout> m_descriptorSetLayout;
1375 const Unique<vk::VkPipelineLayout> m_pipelineLayout;
1376 const Unique<vk::VkPipeline> m_pipeline;
1378 const Unique<vk::VkDescriptorPool> m_descriptorPool;
1379 const Unique<vk::VkDescriptorSet> m_descriptorSet;
1381 const Unique<vk::VkCommandPool> m_cmdPool;
1382 const Unique<vk::VkCommandBuffer> m_cmdBuffer;
1387 ShaderCaseInstance::ShaderCaseInstance (Context& context, const ShaderCaseSpecification& spec)
1388 : TestInstance (context)
1391 , m_posNdxBuffer (createBuffer(context, (vk::VkDeviceSize)TOTAL_POS_NDX_SIZE, vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT))
1392 , m_posNdxMem (allocateAndBindMemory(context, *m_posNdxBuffer, vk::MemoryRequirement::HostVisible))
1394 , m_inputLayout (computeStd430Layout(spec.values.inputs))
1395 , m_inputBuffer (m_inputLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_inputLayout.size, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT) : Move<vk::VkBuffer>())
1396 , m_inputMem (m_inputLayout.size > 0 ? allocateAndBindMemory(context, *m_inputBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1398 , m_referenceLayout (computeStd140Layout(spec.values.outputs))
1399 , m_referenceBuffer (m_referenceLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_referenceLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
1400 , m_referenceMem (m_referenceLayout.size > 0 ? allocateAndBindMemory(context, *m_referenceBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1402 , m_uniformLayout (computeStd140Layout(spec.values.uniforms))
1403 , m_uniformBuffer (m_uniformLayout.size > 0 ? createBuffer(context, (vk::VkDeviceSize)m_uniformLayout.size, vk::VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT) : Move<vk::VkBuffer>())
1404 , m_uniformMem (m_uniformLayout.size > 0 ? allocateAndBindMemory(context, *m_uniformBuffer, vk::MemoryRequirement::HostVisible) : MovePtr<vk::Allocation>())
1406 , m_rtFormat (getRenderTargetFormat(spec.outputFormat))
1407 , m_rtImage (createImage2D(context, RENDER_WIDTH, RENDER_HEIGHT, m_rtFormat, vk::VK_IMAGE_TILING_OPTIMAL, vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT))
1408 , m_rtMem (allocateAndBindMemory(context, *m_rtImage, vk::MemoryRequirement::Any))
1409 , m_rtView (createAttachmentView(context, *m_rtImage, m_rtFormat))
1411 , m_readImageBuffer (createBuffer(context, (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * tcu::getPixelSize(vk::mapVkFormat(m_rtFormat))), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT))
1412 , m_readImageMem (allocateAndBindMemory(context, *m_readImageBuffer, vk::MemoryRequirement::HostVisible))
1414 , m_renderPass (createRenderPass(context, m_rtFormat))
1415 , m_framebuffer (createFramebuffer(context, *m_renderPass, *m_rtView, RENDER_WIDTH, RENDER_HEIGHT))
1416 , m_program (context, spec)
1417 , m_descriptorSetLayout (createDescriptorSetLayout(context, m_program.getStages()))
1418 , m_pipelineLayout (createPipelineLayout(context, *m_descriptorSetLayout))
1419 , m_pipeline (createPipeline(context, spec.values.inputs, m_inputLayout, m_program, *m_renderPass, *m_pipelineLayout, tcu::UVec2(RENDER_WIDTH, RENDER_HEIGHT)))
1421 , m_descriptorPool (createDescriptorPool(context))
1422 , m_descriptorSet (allocateDescriptorSet(context, *m_descriptorPool, *m_descriptorSetLayout))
1424 , m_cmdPool (createCommandPool(context))
1425 , m_cmdBuffer (allocateCommandBuffer(context, *m_cmdPool))
1429 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1430 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1433 const Vec2 s_positions[] =
1440 const deUint16 s_indices[] =
1446 DE_STATIC_ASSERT(sizeof(s_positions) == POSITIONS_SIZE);
1447 DE_STATIC_ASSERT(sizeof(s_indices) == INDICES_SIZE);
1449 deMemcpy((deUint8*)m_posNdxMem->getHostPtr() + POSITIONS_OFFSET, &s_positions[0], sizeof(s_positions));
1450 deMemcpy((deUint8*)m_posNdxMem->getHostPtr() + INDICES_OFFSET, &s_indices[0], sizeof(s_indices));
1452 flushMappedMemoryRange(m_context.getDeviceInterface(), context.getDevice(), m_posNdxMem->getMemory(), m_posNdxMem->getOffset(), sizeof(s_positions)+sizeof(s_indices));
1455 if (!m_spec.values.uniforms.empty())
1457 const vk::VkDescriptorBufferInfo bufInfo =
1460 (vk::VkDeviceSize)0, // offset
1461 (vk::VkDeviceSize)m_uniformLayout.size
1464 vk::DescriptorSetUpdateBuilder()
1465 .writeSingle(*m_descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(USER_UNIFORM_BINDING),
1466 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &bufInfo)
1467 .update(vkd, m_context.getDevice());
1470 if (!m_spec.values.outputs.empty())
1472 const vk::VkDescriptorBufferInfo bufInfo =
1475 (vk::VkDeviceSize)0, // offset
1476 (vk::VkDeviceSize)m_referenceLayout.size
1479 vk::DescriptorSetUpdateBuilder()
1480 .writeSingle(*m_descriptorSet, vk::DescriptorSetUpdateBuilder::Location::binding(REFERENCE_UNIFORM_BINDING),
1481 vk::VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &bufInfo)
1482 .update(vkd, m_context.getDevice());
1485 // Record command buffer
1488 const vk::VkCommandBufferBeginInfo beginInfo =
1490 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // sType
1493 (const vk::VkCommandBufferInheritanceInfo*)DE_NULL,
1496 VK_CHECK(vkd.beginCommandBuffer(*m_cmdBuffer, &beginInfo));
1500 const vk::VkMemoryBarrier vertFlushBarrier =
1502 vk::VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
1504 vk::VK_ACCESS_HOST_WRITE_BIT, // srcAccessMask
1505 vk::VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT|vk::VK_ACCESS_UNIFORM_READ_BIT, // dstAccessMask
1507 const vk::VkImageMemoryBarrier colorAttBarrier =
1509 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1511 0u, // srcAccessMask
1512 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // dstAccessMask
1513 vk::VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1514 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1515 queueFamilyIndex, // srcQueueFamilyIndex
1516 queueFamilyIndex, // destQueueFamilyIndex
1517 *m_rtImage, // image
1519 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1522 0u, // baseArraySlice
1524 } // subresourceRange
1527 vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_HOST_BIT, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (vk::VkDependencyFlags)0,
1528 1, &vertFlushBarrier,
1529 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1530 1, &colorAttBarrier);
1534 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
1535 const vk::VkRenderPassBeginInfo passBeginInfo =
1537 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // sType
1539 *m_renderPass, // renderPass
1540 *m_framebuffer, // framebuffer
1541 { { 0, 0 }, { RENDER_WIDTH, RENDER_HEIGHT } }, // renderArea
1542 1u, // clearValueCount
1543 &clearValue, // pClearValues
1546 vkd.cmdBeginRenderPass(*m_cmdBuffer, &passBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
1549 vkd.cmdBindPipeline(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipeline);
1551 if (!m_spec.values.uniforms.empty() || !m_spec.values.outputs.empty())
1552 vkd.cmdBindDescriptorSets(*m_cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_pipelineLayout, 0u, 1u, &*m_descriptorSet, 0u, DE_NULL);
1555 const vk::VkBuffer buffers[] = { *m_posNdxBuffer, *m_inputBuffer };
1556 const vk::VkDeviceSize offsets[] = { POSITIONS_OFFSET, 0u };
1557 const deUint32 numBuffers = buffers[1] != 0 ? 2u : 1u;
1558 vkd.cmdBindVertexBuffers(*m_cmdBuffer, 0u, numBuffers, buffers, offsets);
1561 vkd.cmdBindIndexBuffer (*m_cmdBuffer, *m_posNdxBuffer, (vk::VkDeviceSize)INDICES_OFFSET, vk::VK_INDEX_TYPE_UINT16);
1562 vkd.cmdDrawIndexed (*m_cmdBuffer, 6u, 1u, 0u, 0u, 0u);
1563 vkd.cmdEndRenderPass (*m_cmdBuffer);
1566 const vk::VkImageMemoryBarrier renderFinishBarrier =
1568 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1570 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // srcAccessMask
1571 vk::VK_ACCESS_TRANSFER_READ_BIT, // dstAccessMask
1572 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1573 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1574 queueFamilyIndex, // srcQueueFamilyIndex
1575 queueFamilyIndex, // destQueueFamilyIndex
1576 *m_rtImage, // image
1578 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1581 0u, // baseArraySlice
1583 } // subresourceRange
1586 vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0,
1587 0, (const vk::VkMemoryBarrier*)DE_NULL,
1588 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1589 1, &renderFinishBarrier);
1593 const vk::VkBufferImageCopy copyParams =
1595 (vk::VkDeviceSize)0u, // bufferOffset
1596 (deUint32)RENDER_WIDTH, // bufferRowLength
1597 (deUint32)RENDER_HEIGHT, // bufferImageHeight
1599 vk::VK_IMAGE_ASPECT_COLOR_BIT, // aspect
1603 }, // imageSubresource
1604 { 0u, 0u, 0u }, // imageOffset
1605 { RENDER_WIDTH, RENDER_HEIGHT, 1u } // imageExtent
1608 vkd.cmdCopyImageToBuffer(*m_cmdBuffer, *m_rtImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *m_readImageBuffer, 1u, ©Params);
1612 const vk::VkDeviceSize size = (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * tcu::getPixelSize(vk::mapVkFormat(m_rtFormat)));
1613 const vk::VkBufferMemoryBarrier copyFinishBarrier =
1615 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
1617 vk::VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1618 vk::VK_ACCESS_HOST_READ_BIT, // dstAccessMask
1619 queueFamilyIndex, // srcQueueFamilyIndex
1620 queueFamilyIndex, // destQueueFamilyIndex
1621 *m_readImageBuffer, // buffer
1626 vkd.cmdPipelineBarrier(*m_cmdBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
1627 0, (const vk::VkMemoryBarrier*)DE_NULL,
1628 1, ©FinishBarrier,
1629 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1632 VK_CHECK(vkd.endCommandBuffer(*m_cmdBuffer));
1635 ShaderCaseInstance::~ShaderCaseInstance (void)
1639 int getNumSubCases (const ValueBlock& values)
1641 if (!values.outputs.empty())
1642 return int(values.outputs[0].elements.size() / values.outputs[0].type.getScalarSize());
1644 return 1; // Always run at least one iteration even if no output values are specified
1647 bool checkResultImage (const ConstPixelBufferAccess& result)
1649 const tcu::IVec4 refPix (255, 255, 255, 255);
1651 for (int y = 0; y < result.getHeight(); y++)
1653 for (int x = 0; x < result.getWidth(); x++)
1655 const tcu::IVec4 resPix = result.getPixelInt(x, y);
1657 if (boolAny(notEqual(resPix, refPix)))
1665 bool checkResultImageWithReference (const ConstPixelBufferAccess& result, tcu::IVec4 refPix)
1667 for (int y = 0; y < result.getHeight(); y++)
1669 for (int x = 0; x < result.getWidth(); x++)
1671 const tcu::IVec4 resPix = result.getPixelInt(x, y);
1673 if (boolAny(notEqual(resPix, refPix)))
1680 TestStatus ShaderCaseInstance::iterate (void)
1682 const vk::DeviceInterface& vkd = m_context.getDeviceInterface();
1683 const vk::VkDevice device = m_context.getDevice();
1684 const vk::VkQueue queue = m_context.getUniversalQueue();
1686 if (!m_spec.values.inputs.empty())
1687 writeValuesToMem(m_context, *m_inputMem, m_inputLayout, m_spec.values.inputs, m_subCaseNdx);
1689 if (!m_spec.values.outputs.empty())
1690 writeValuesToMem(m_context, *m_referenceMem, m_referenceLayout, m_spec.values.outputs, m_subCaseNdx);
1692 if (!m_spec.values.uniforms.empty())
1693 writeValuesToMem(m_context, *m_uniformMem, m_uniformLayout, m_spec.values.uniforms, m_subCaseNdx);
1696 const vk::VkSubmitInfo submitInfo =
1698 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO,
1700 0u, // waitSemaphoreCount
1701 (const vk::VkSemaphore*)0, // pWaitSemaphores
1702 (const vk::VkPipelineStageFlags*)DE_NULL,
1705 0u, // signalSemaphoreCount
1706 (const vk::VkSemaphore*)0, // pSignalSemaphores
1708 const Unique<vk::VkFence> fence (vk::createFence(vkd, device));
1710 VK_CHECK(vkd.queueSubmit (queue, 1u, &submitInfo, *fence));
1711 VK_CHECK(vkd.waitForFences (device, 1u, &fence.get(), DE_TRUE, ~0ull));
1714 // Result was checked in fragment shader
1715 if (m_spec.outputType == glu::sl::OUTPUT_RESULT)
1717 const ConstPixelBufferAccess imgAccess (TextureFormat(TextureFormat::RGBA, TextureFormat::UNORM_INT8), RENDER_WIDTH, RENDER_HEIGHT, 1, m_readImageMem->getHostPtr());
1719 invalidateMappedMemoryRange(vkd, device, m_readImageMem->getMemory(), m_readImageMem->getOffset(), (vk::VkDeviceSize)(RENDER_WIDTH*RENDER_HEIGHT*4));
1721 if (!checkResultImage(imgAccess))
1723 TestLog& log = m_context.getTestContext().getLog();
1725 log << TestLog::Message << "ERROR: Got non-white pixels on sub-case " << m_subCaseNdx << TestLog::EndMessage
1726 << TestLog::Image("Result", "Result", imgAccess);
1728 dumpValues(log, m_spec.values, m_subCaseNdx);
1730 return TestStatus::fail(string("Got invalid pixels at sub-case ") + de::toString(m_subCaseNdx));
1733 // Result was written to color buffer
1736 const ConstPixelBufferAccess imgAccess (vk::mapVkFormat(m_rtFormat), RENDER_WIDTH, RENDER_HEIGHT, 1, m_readImageMem->getHostPtr());
1737 const DataType dataType = m_spec.values.outputs[0].type.getBasicType();
1738 const int numComponents = getDataTypeScalarSize(dataType);
1739 tcu::IVec4 reference (0, 0, 0, 1);
1741 for (int refNdx = 0; refNdx < numComponents; refNdx++)
1743 if (isDataTypeFloatOrVec(dataType))
1744 reference[refNdx] = (int)m_spec.values.outputs[0].elements[m_subCaseNdx * numComponents + refNdx].float32;
1745 else if (isDataTypeIntOrIVec(dataType))
1746 reference[refNdx] = m_spec.values.outputs[0].elements[m_subCaseNdx * numComponents + refNdx].int32;
1748 DE_FATAL("Unknown data type");
1751 invalidateMappedMemoryRange(vkd, device, m_readImageMem->getMemory(), m_readImageMem->getOffset(), (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * tcu::getPixelSize(vk::mapVkFormat(m_rtFormat))));
1753 if (!checkResultImageWithReference(imgAccess, reference))
1755 TestLog& log = m_context.getTestContext().getLog();
1757 log << TestLog::Message << "ERROR: Got nonmatching pixels on sub-case " << m_subCaseNdx << TestLog::EndMessage
1758 << TestLog::Image("Result", "Result", imgAccess);
1760 dumpValues(log, m_spec.values, m_subCaseNdx);
1762 return TestStatus::fail(string("Got invalid pixels at sub-case ") + de::toString(m_subCaseNdx));
1767 if (++m_subCaseNdx < getNumSubCases(m_spec.values))
1768 return TestStatus::incomplete();
1770 return TestStatus::pass("All sub-cases passed");
1773 class ShaderCase : public TestCase
1776 ShaderCase (tcu::TestContext& testCtx, const string& name, const string& description, const ShaderCaseSpecification& spec);
1779 void initPrograms (SourceCollections& programCollection) const;
1780 TestInstance* createInstance (Context& context) const;
1783 const ShaderCaseSpecification m_spec;
1786 ShaderCase::ShaderCase (tcu::TestContext& testCtx, const string& name, const string& description, const ShaderCaseSpecification& spec)
1787 : TestCase (testCtx, name, description)
1792 void ShaderCase::initPrograms (SourceCollections& sourceCollection) const
1794 vector<ProgramSources> specializedSources (m_spec.programs.size());
1796 DE_ASSERT(isValid(m_spec));
1798 if (m_spec.expectResult != glu::sl::EXPECT_PASS)
1799 TCU_THROW(InternalError, "Only EXPECT_PASS is supported");
1801 if (m_spec.caseType == glu::sl::CASETYPE_VERTEX_ONLY)
1803 DE_ASSERT(m_spec.programs.size() == 1 && m_spec.programs[0].sources.sources[glu::SHADERTYPE_VERTEX].size() == 1);
1804 specializedSources[0] << glu::VertexSource(specializeVertexShader(m_spec, m_spec.programs[0].sources.sources[glu::SHADERTYPE_VERTEX][0]))
1805 << glu::FragmentSource(genFragmentShader(m_spec));
1807 else if (m_spec.caseType == glu::sl::CASETYPE_FRAGMENT_ONLY)
1809 DE_ASSERT(m_spec.programs.size() == 1 && m_spec.programs[0].sources.sources[glu::SHADERTYPE_FRAGMENT].size() == 1);
1810 specializedSources[0] << glu::VertexSource(genVertexShader(m_spec))
1811 << glu::FragmentSource(specializeFragmentShader(m_spec, m_spec.programs[0].sources.sources[glu::SHADERTYPE_FRAGMENT][0]));
1815 DE_ASSERT(m_spec.caseType == glu::sl::CASETYPE_COMPLETE);
1817 const int maxPatchVertices = 4; // \todo [2015-08-05 pyry] Query
1819 for (size_t progNdx = 0; progNdx < m_spec.programs.size(); progNdx++)
1821 const ProgramSpecializationParams progSpecParams (m_spec, m_spec.programs[progNdx].requiredExtensions, maxPatchVertices);
1823 specializeProgramSources(specializedSources[progNdx], m_spec.programs[progNdx].sources, progSpecParams);
1827 for (size_t progNdx = 0; progNdx < specializedSources.size(); progNdx++)
1829 for (int shaderType = 0; shaderType < glu::SHADERTYPE_LAST; shaderType++)
1831 if (!specializedSources[progNdx].sources[shaderType].empty())
1833 vk::GlslSource& curSrc = sourceCollection.glslSources.add(getShaderName((glu::ShaderType)shaderType, progNdx));
1834 curSrc.sources[shaderType] = specializedSources[progNdx].sources[shaderType];
1840 TestInstance* ShaderCase::createInstance (Context& context) const
1842 return new ShaderCaseInstance(context, m_spec);
1845 class ShaderCaseFactory : public glu::sl::ShaderCaseFactory
1848 ShaderCaseFactory (tcu::TestContext& testCtx)
1849 : m_testCtx(testCtx)
1853 tcu::TestCaseGroup* createGroup (const string& name, const string& description, const vector<tcu::TestNode*>& children)
1855 return new tcu::TestCaseGroup(m_testCtx, name.c_str(), description.c_str(), children);
1858 tcu::TestCase* createCase (const string& name, const string& description, const ShaderCaseSpecification& spec)
1860 return new ShaderCase(m_testCtx, name, description, spec);
1864 tcu::TestContext& m_testCtx;
1867 class ShaderLibraryGroup : public tcu::TestCaseGroup
1870 ShaderLibraryGroup (tcu::TestContext& testCtx, const string& name, const string& description, const string& filename)
1871 : tcu::TestCaseGroup (testCtx, name.c_str(), description.c_str())
1872 , m_filename (filename)
1878 ShaderCaseFactory caseFactory (m_testCtx);
1879 const vector<tcu::TestNode*> children = glu::sl::parseFile(m_testCtx.getArchive(), m_filename, &caseFactory);
1881 for (size_t ndx = 0; ndx < children.size(); ndx++)
1885 addChild(children[ndx]);
1889 for (; ndx < children.size(); ndx++)
1890 delete children[ndx];
1897 const string m_filename;
1902 MovePtr<tcu::TestCaseGroup> createShaderLibraryGroup (tcu::TestContext& testCtx, const string& name, const string& description, const string& filename)
1904 return MovePtr<tcu::TestCaseGroup>(new ShaderLibraryGroup(testCtx, name, description, filename));