1 /*-------------------------------------------------------------------------
2 * drawElements Quality Program OpenGL ES 3.1 Module
3 * -------------------------------------------------
5 * Copyright 2014 The Android Open Source Project
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
11 * http://www.apache.org/licenses/LICENSE-2.0
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
21 * \brief Opaque type (sampler, buffer, atomic counter, ...) indexing tests.
23 * \todo [2014-03-05 pyry] Extend with following:
24 * + sampler: different filtering modes, multiple sizes, incomplete textures
25 * + SSBO: write, atomic op, unsized array .length()
26 *//*--------------------------------------------------------------------*/
28 #include "es31fOpaqueTypeIndexingTests.hpp"
29 #include "tcuTexture.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuFormatUtil.hpp"
32 #include "tcuVectorUtil.hpp"
33 #include "gluShaderUtil.hpp"
34 #include "gluShaderProgram.hpp"
35 #include "gluObjectWrapper.hpp"
36 #include "gluTextureUtil.hpp"
37 #include "gluRenderContext.hpp"
38 #include "gluProgramInterfaceQuery.hpp"
39 #include "gluContextInfo.hpp"
40 #include "glsShaderExecUtil.hpp"
41 #include "glwFunctions.hpp"
42 #include "glwEnums.hpp"
43 #include "deUniquePtr.hpp"
44 #include "deStringUtil.hpp"
45 #include "deRandom.hpp"
59 using namespace gls::ShaderExecUtil;
63 using tcu::TextureFormat;
66 typedef de::UniquePtr<ShaderExecutor> ShaderExecutorPtr;
70 INDEX_EXPR_TYPE_CONST_LITERAL = 0,
71 INDEX_EXPR_TYPE_CONST_EXPRESSION,
72 INDEX_EXPR_TYPE_UNIFORM,
73 INDEX_EXPR_TYPE_DYNAMIC_UNIFORM,
83 TEXTURE_TYPE_2D_ARRAY,
85 TEXTURE_TYPE_CUBE_ARRAY,
90 static void declareUniformIndexVars (std::ostream& str, const char* varPrefix, int numVars)
92 for (int varNdx = 0; varNdx < numVars; varNdx++)
93 str << "uniform highp int " << varPrefix << varNdx << ";\n";
96 static void uploadUniformIndices (const glw::Functions& gl, deUint32 program, const char* varPrefix, int numIndices, const int* indices)
98 for (int varNdx = 0; varNdx < numIndices; varNdx++)
100 const string varName = varPrefix + de::toString(varNdx);
101 const int loc = gl.getUniformLocation(program, varName.c_str());
102 TCU_CHECK_MSG(loc >= 0, ("No location assigned for uniform '" + varName + "'").c_str());
104 gl.uniform1i(loc, indices[varNdx]);
109 static T maxElement (const std::vector<T>& elements)
111 T maxElem = elements[0];
113 for (size_t ndx = 1; ndx < elements.size(); ndx++)
114 maxElem = de::max(maxElem, elements[ndx]);
119 static TextureType getTextureType (glu::DataType samplerType)
123 case glu::TYPE_SAMPLER_1D:
124 case glu::TYPE_INT_SAMPLER_1D:
125 case glu::TYPE_UINT_SAMPLER_1D:
126 case glu::TYPE_SAMPLER_1D_SHADOW:
127 return TEXTURE_TYPE_1D;
129 case glu::TYPE_SAMPLER_2D:
130 case glu::TYPE_INT_SAMPLER_2D:
131 case glu::TYPE_UINT_SAMPLER_2D:
132 case glu::TYPE_SAMPLER_2D_SHADOW:
133 return TEXTURE_TYPE_2D;
135 case glu::TYPE_SAMPLER_CUBE:
136 case glu::TYPE_INT_SAMPLER_CUBE:
137 case glu::TYPE_UINT_SAMPLER_CUBE:
138 case glu::TYPE_SAMPLER_CUBE_SHADOW:
139 return TEXTURE_TYPE_CUBE;
141 case glu::TYPE_SAMPLER_2D_ARRAY:
142 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
143 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
144 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
145 return TEXTURE_TYPE_2D_ARRAY;
147 case glu::TYPE_SAMPLER_3D:
148 case glu::TYPE_INT_SAMPLER_3D:
149 case glu::TYPE_UINT_SAMPLER_3D:
150 return TEXTURE_TYPE_3D;
152 case glu::TYPE_SAMPLER_CUBE_ARRAY:
153 case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
154 case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
155 case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
156 return TEXTURE_TYPE_CUBE_ARRAY;
159 TCU_THROW(InternalError, "Invalid sampler type");
163 static bool isShadowSampler (glu::DataType samplerType)
165 return samplerType == glu::TYPE_SAMPLER_1D_SHADOW ||
166 samplerType == glu::TYPE_SAMPLER_2D_SHADOW ||
167 samplerType == glu::TYPE_SAMPLER_2D_ARRAY_SHADOW ||
168 samplerType == glu::TYPE_SAMPLER_CUBE_SHADOW ||
169 samplerType == glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW;
172 static glu::DataType getSamplerOutputType (glu::DataType samplerType)
176 case glu::TYPE_SAMPLER_1D:
177 case glu::TYPE_SAMPLER_2D:
178 case glu::TYPE_SAMPLER_CUBE:
179 case glu::TYPE_SAMPLER_2D_ARRAY:
180 case glu::TYPE_SAMPLER_3D:
181 case glu::TYPE_SAMPLER_CUBE_ARRAY:
182 return glu::TYPE_FLOAT_VEC4;
184 case glu::TYPE_SAMPLER_1D_SHADOW:
185 case glu::TYPE_SAMPLER_2D_SHADOW:
186 case glu::TYPE_SAMPLER_CUBE_SHADOW:
187 case glu::TYPE_SAMPLER_2D_ARRAY_SHADOW:
188 case glu::TYPE_SAMPLER_CUBE_ARRAY_SHADOW:
189 return glu::TYPE_FLOAT;
191 case glu::TYPE_INT_SAMPLER_1D:
192 case glu::TYPE_INT_SAMPLER_2D:
193 case glu::TYPE_INT_SAMPLER_CUBE:
194 case glu::TYPE_INT_SAMPLER_2D_ARRAY:
195 case glu::TYPE_INT_SAMPLER_3D:
196 case glu::TYPE_INT_SAMPLER_CUBE_ARRAY:
197 return glu::TYPE_INT_VEC4;
199 case glu::TYPE_UINT_SAMPLER_1D:
200 case glu::TYPE_UINT_SAMPLER_2D:
201 case glu::TYPE_UINT_SAMPLER_CUBE:
202 case glu::TYPE_UINT_SAMPLER_2D_ARRAY:
203 case glu::TYPE_UINT_SAMPLER_3D:
204 case glu::TYPE_UINT_SAMPLER_CUBE_ARRAY:
205 return glu::TYPE_UINT_VEC4;
208 TCU_THROW(InternalError, "Invalid sampler type");
212 static tcu::TextureFormat getSamplerTextureFormat (glu::DataType samplerType)
214 const glu::DataType outType = getSamplerOutputType(samplerType);
215 const glu::DataType outScalarType = glu::getDataTypeScalarType(outType);
217 switch (outScalarType)
219 case glu::TYPE_FLOAT:
220 if (isShadowSampler(samplerType))
221 return tcu::TextureFormat(tcu::TextureFormat::D, tcu::TextureFormat::UNORM_INT16);
223 return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
225 case glu::TYPE_INT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::SIGNED_INT8);
226 case glu::TYPE_UINT: return tcu::TextureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNSIGNED_INT8);
229 TCU_THROW(InternalError, "Invalid sampler type");
233 static glu::DataType getSamplerCoordType (glu::DataType samplerType)
235 const TextureType texType = getTextureType(samplerType);
240 case TEXTURE_TYPE_1D: numCoords = 1; break;
241 case TEXTURE_TYPE_2D: numCoords = 2; break;
242 case TEXTURE_TYPE_2D_ARRAY: numCoords = 3; break;
243 case TEXTURE_TYPE_CUBE: numCoords = 3; break;
244 case TEXTURE_TYPE_3D: numCoords = 3; break;
245 case TEXTURE_TYPE_CUBE_ARRAY: numCoords = 4; break;
247 TCU_THROW(InternalError, "Invalid texture type");
250 if (isShadowSampler(samplerType) && samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW)
253 DE_ASSERT(de::inRange(numCoords, 1, 4));
255 return numCoords == 1 ? glu::TYPE_FLOAT : glu::getDataTypeFloatVec(numCoords);
258 static deUint32 getGLTextureTarget (TextureType texType)
262 case TEXTURE_TYPE_1D: return GL_TEXTURE_1D;
263 case TEXTURE_TYPE_2D: return GL_TEXTURE_2D;
264 case TEXTURE_TYPE_2D_ARRAY: return GL_TEXTURE_2D_ARRAY;
265 case TEXTURE_TYPE_CUBE: return GL_TEXTURE_CUBE_MAP;
266 case TEXTURE_TYPE_3D: return GL_TEXTURE_3D;
267 case TEXTURE_TYPE_CUBE_ARRAY: return GL_TEXTURE_CUBE_MAP_ARRAY;
269 TCU_THROW(InternalError, "Invalid texture type");
273 static void setupTexture (const glw::Functions& gl,
275 glu::DataType samplerType,
276 tcu::TextureFormat texFormat,
279 const TextureType texType = getTextureType(samplerType);
280 const deUint32 texTarget = getGLTextureTarget(texType);
281 const deUint32 intFormat = glu::getInternalFormat(texFormat);
282 const glu::TransferFormat transferFmt = glu::getTransferFormat(texFormat);
284 // \todo [2014-03-04 pyry] Use larger than 1x1 textures?
286 gl.bindTexture(texTarget, texture);
290 case TEXTURE_TYPE_1D:
291 gl.texStorage1D(texTarget, 1, intFormat, 1);
292 gl.texSubImage1D(texTarget, 0, 0, 1, transferFmt.format, transferFmt.dataType, color);
295 case TEXTURE_TYPE_2D:
296 gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
297 gl.texSubImage2D(texTarget, 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
300 case TEXTURE_TYPE_2D_ARRAY:
301 case TEXTURE_TYPE_3D:
302 gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 1);
303 gl.texSubImage3D(texTarget, 0, 0, 0, 0, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
306 case TEXTURE_TYPE_CUBE_ARRAY:
307 gl.texStorage3D(texTarget, 1, intFormat, 1, 1, 6);
308 for (int zoffset = 0; zoffset < 6; ++zoffset)
309 for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
310 gl.texSubImage3D(texTarget, 0, 0, 0, zoffset, 1, 1, 1, transferFmt.format, transferFmt.dataType, color);
313 case TEXTURE_TYPE_CUBE:
314 gl.texStorage2D(texTarget, 1, intFormat, 1, 1);
315 for (int face = 0; face < tcu::CUBEFACE_LAST; face++)
316 gl.texSubImage2D(glu::getGLCubeFace((tcu::CubeFace)face), 0, 0, 0, 1, 1, transferFmt.format, transferFmt.dataType, color);
320 TCU_THROW(InternalError, "Invalid texture type");
323 gl.texParameteri(texTarget, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
324 gl.texParameteri(texTarget, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
326 if (isShadowSampler(samplerType))
327 gl.texParameteri(texTarget, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
329 GLU_EXPECT_NO_ERROR(gl.getError(), "Texture setup failed");
332 class SamplerIndexingCase : public TestCase
335 SamplerIndexingCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType);
336 ~SamplerIndexingCase (void);
339 IterateResult iterate (void);
342 SamplerIndexingCase (const SamplerIndexingCase&);
343 SamplerIndexingCase& operator= (const SamplerIndexingCase&);
345 void getShaderSpec (ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices, const RenderContext& renderContext) const;
347 const glu::ShaderType m_shaderType;
348 const glu::DataType m_samplerType;
349 const IndexExprType m_indexExprType;
352 SamplerIndexingCase::SamplerIndexingCase (Context& context, const char* name, const char* description, glu::ShaderType shaderType, glu::DataType samplerType, IndexExprType indexExprType)
353 : TestCase (context, name, description)
354 , m_shaderType (shaderType)
355 , m_samplerType (samplerType)
356 , m_indexExprType (indexExprType)
360 SamplerIndexingCase::~SamplerIndexingCase (void)
364 void SamplerIndexingCase::init (void)
366 if (!contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)))
368 if (m_shaderType == SHADERTYPE_GEOMETRY)
369 TCU_CHECK_AND_THROW(NotSupportedError,
370 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
371 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
373 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
374 TCU_CHECK_AND_THROW(NotSupportedError,
375 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
376 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
378 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
379 TCU_CHECK_AND_THROW(NotSupportedError,
380 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
381 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of sampler arrays.");
383 if (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY
384 || m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW
385 || m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY
386 || m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY)
388 TCU_CHECK_AND_THROW(NotSupportedError,
389 m_context.getContextInfo().isExtensionSupported("GL_EXT_texture_cube_map_array"),
390 "GL_EXT_texture_cube_map_array extension is required for cube map arrays.");
395 void SamplerIndexingCase::getShaderSpec (ShaderSpec* spec, int numSamplers, int numLookups, const int* lookupIndices, const RenderContext& renderContext) const
397 const char* samplersName = "sampler";
398 const char* coordsName = "coords";
399 const char* indicesPrefix = "index";
400 const char* resultPrefix = "result";
401 const DataType coordType = getSamplerCoordType(m_samplerType);
402 const DataType outType = getSamplerOutputType(m_samplerType);
403 const bool isES32 = contextSupports(renderContext.getType(), glu::ApiType::es(3, 2));
404 std::ostringstream global;
405 std::ostringstream code;
407 spec->inputs.push_back(Symbol(coordsName, VarType(coordType, PRECISION_HIGHP)));
409 if (!isES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
410 global << "#extension GL_EXT_gpu_shader5 : require\n";
413 && (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY
414 || m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW
415 || m_samplerType == TYPE_INT_SAMPLER_CUBE_ARRAY
416 || m_samplerType == TYPE_UINT_SAMPLER_CUBE_ARRAY))
418 global << "#extension GL_EXT_texture_cube_map_array: require\n";
421 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
422 global << "const highp int indexBase = 1;\n";
425 "uniform highp " << getDataTypeName(m_samplerType) << " " << samplersName << "[" << numSamplers << "];\n";
427 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
429 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
431 const string varName = indicesPrefix + de::toString(lookupNdx);
432 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
435 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
436 declareUniformIndexVars(global, indicesPrefix, numLookups);
438 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
440 const string varName = resultPrefix + de::toString(lookupNdx);
441 spec->outputs.push_back(Symbol(varName, VarType(outType, PRECISION_HIGHP)));
444 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
446 code << resultPrefix << "" << lookupNdx << " = texture(" << samplersName << "[";
448 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
449 code << lookupIndices[lookupNdx];
450 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
451 code << "indexBase + " << (lookupIndices[lookupNdx]-1);
453 code << indicesPrefix << lookupNdx;
456 code << "], " << coordsName << (m_samplerType == TYPE_SAMPLER_CUBE_ARRAY_SHADOW ? ", 0.0" : "") << ");\n";
459 spec->version = isES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
460 spec->globalDeclarations = global.str();
461 spec->source = code.str();
464 static void fillTextureData (const tcu::PixelBufferAccess& access, de::Random& rnd)
466 DE_ASSERT(access.getHeight() == 1 && access.getDepth() == 1);
468 if (access.getFormat().order == TextureFormat::D)
470 // \note Texture uses odd values, lookup even values to avoid precision issues.
471 const float values[] = { 0.1f, 0.3f, 0.5f, 0.7f, 0.9f };
473 for (int ndx = 0; ndx < access.getWidth(); ndx++)
474 access.setPixDepth(rnd.choose<float>(DE_ARRAY_BEGIN(values), DE_ARRAY_END(values)), ndx, 0);
478 TCU_CHECK_INTERNAL(access.getFormat().order == TextureFormat::RGBA && access.getFormat().getPixelSize() == 4);
480 for (int ndx = 0; ndx < access.getWidth(); ndx++)
481 *((deUint32*)access.getDataPtr() + ndx) = rnd.getUint32();
485 SamplerIndexingCase::IterateResult SamplerIndexingCase::iterate (void)
487 const int numInvocations = 64;
488 const int numSamplers = 8;
489 const int numLookups = 4;
490 const DataType coordType = getSamplerCoordType(m_samplerType);
491 const DataType outputType = getSamplerOutputType(m_samplerType);
492 const TextureFormat texFormat = getSamplerTextureFormat(m_samplerType);
493 const int outLookupStride = numInvocations*getDataTypeScalarSize(outputType);
494 vector<int> lookupIndices (numLookups);
495 vector<float> coords;
496 vector<deUint32> outData;
497 vector<deUint8> texData (numSamplers * texFormat.getPixelSize());
498 const tcu::PixelBufferAccess refTexAccess (texFormat, numSamplers, 1, 1, &texData[0]);
500 ShaderSpec shaderSpec;
501 de::Random rnd (deInt32Hash(m_samplerType) ^ deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
503 for (int ndx = 0; ndx < numLookups; ndx++)
504 lookupIndices[ndx] = rnd.getInt(0, numSamplers-1);
506 getShaderSpec(&shaderSpec, numSamplers, numLookups, &lookupIndices[0], m_context.getRenderContext());
508 coords.resize(numInvocations * getDataTypeScalarSize(coordType));
510 if (m_samplerType != TYPE_SAMPLER_CUBE_ARRAY_SHADOW && isShadowSampler(m_samplerType))
512 // Use different comparison value per invocation.
513 // \note Texture uses odd values, comparison even values.
514 const int numCoordComps = getDataTypeScalarSize(coordType);
515 const float cmpValues[] = { 0.0f, 0.2f, 0.4f, 0.6f, 0.8f, 1.0f };
517 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
518 coords[invocationNdx*numCoordComps + (numCoordComps-1)] = rnd.choose<float>(DE_ARRAY_BEGIN(cmpValues), DE_ARRAY_END(cmpValues));
521 fillTextureData(refTexAccess, rnd);
523 outData.resize(numLookups*outLookupStride);
526 const RenderContext& renderCtx = m_context.getRenderContext();
527 const glw::Functions& gl = renderCtx.getFunctions();
528 ShaderExecutorPtr executor (createExecutor(m_context.getRenderContext(), m_shaderType, shaderSpec));
529 TextureVector textures (renderCtx, numSamplers);
530 vector<void*> inputs;
531 vector<void*> outputs;
532 vector<int> expandedIndices;
533 const int maxIndex = maxElement(lookupIndices);
535 m_testCtx.getLog() << *executor;
537 if (!executor->isOk())
538 TCU_FAIL("Compile failed");
540 executor->useProgram();
542 // \todo [2014-03-05 pyry] Do we want to randomize tex unit assignments?
543 for (int samplerNdx = 0; samplerNdx < numSamplers; samplerNdx++)
545 const string samplerName = string("sampler[") + de::toString(samplerNdx) + "]";
546 const int samplerLoc = gl.getUniformLocation(executor->getProgram(), samplerName.c_str());
548 if (samplerNdx > maxIndex && samplerLoc < 0)
549 continue; // Unused uniform eliminated by compiler
551 TCU_CHECK_MSG(samplerLoc >= 0, (string("No location for uniform '") + samplerName + "' found").c_str());
553 gl.activeTexture(GL_TEXTURE0 + samplerNdx);
554 setupTexture(gl, textures[samplerNdx], m_samplerType, texFormat, &texData[samplerNdx*texFormat.getPixelSize()]);
556 gl.uniform1i(samplerLoc, samplerNdx);
559 inputs.push_back(&coords[0]);
561 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
563 expandedIndices.resize(numInvocations * lookupIndices.size());
564 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
566 for (int invNdx = 0; invNdx < numInvocations; invNdx++)
567 expandedIndices[lookupNdx*numInvocations + invNdx] = lookupIndices[lookupNdx];
570 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
571 inputs.push_back(&expandedIndices[lookupNdx*numInvocations]);
573 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
574 uploadUniformIndices(gl, executor->getProgram(), "index", numLookups, &lookupIndices[0]);
576 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
577 outputs.push_back(&outData[outLookupStride*lookupNdx]);
579 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
581 executor->execute(numInvocations, &inputs[0], &outputs[0]);
584 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
586 if (isShadowSampler(m_samplerType))
588 const tcu::Sampler refSampler (tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE, tcu::Sampler::CLAMP_TO_EDGE,
589 tcu::Sampler::NEAREST, tcu::Sampler::NEAREST, 0.0f, false /* non-normalized */,
590 tcu::Sampler::COMPAREMODE_LESS);
591 const int numCoordComps = getDataTypeScalarSize(coordType);
593 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 1);
595 // Each invocation may have different results.
596 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
598 const float coord = coords[invocationNdx*numCoordComps + (numCoordComps-1)];
600 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
602 const int texNdx = lookupIndices[lookupNdx];
603 const float result = *((const float*)(const deUint8*)&outData[lookupNdx*outLookupStride + invocationNdx]);
604 const float reference = refTexAccess.sample2DCompare(refSampler, tcu::Sampler::NEAREST, coord, (float)texNdx, 0.0f, tcu::IVec3(0));
606 if (de::abs(result-reference) > 0.005f)
608 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx << ", lookup " << lookupNdx << ": expected "
609 << reference << ", got " << result
610 << TestLog::EndMessage;
612 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
613 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
620 TCU_CHECK_INTERNAL(getDataTypeScalarSize(outputType) == 4);
622 // Validate results from first invocation
623 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
625 const int texNdx = lookupIndices[lookupNdx];
626 const deUint8* resPtr = (const deUint8*)&outData[lookupNdx*outLookupStride];
629 if (outputType == TYPE_FLOAT_VEC4)
631 const float threshold = 1.0f / 256.0f;
632 const tcu::Vec4 reference = refTexAccess.getPixel(texNdx, 0);
633 const float* floatPtr = (const float*)resPtr;
634 const tcu::Vec4 result (floatPtr[0], floatPtr[1], floatPtr[2], floatPtr[3]);
636 isOk = boolAll(lessThanEqual(abs(reference-result), tcu::Vec4(threshold)));
640 m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
641 << reference << ", got " << result
642 << TestLog::EndMessage;
647 const tcu::UVec4 reference = refTexAccess.getPixelUint(texNdx, 0);
648 const deUint32* uintPtr = (const deUint32*)resPtr;
649 const tcu::UVec4 result (uintPtr[0], uintPtr[1], uintPtr[2], uintPtr[3]);
651 isOk = boolAll(equal(reference, result));
655 m_testCtx.getLog() << TestLog::Message << "ERROR: at lookup " << lookupNdx << ": expected "
656 << reference << ", got " << result
657 << TestLog::EndMessage;
661 if (!isOk && m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
662 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Got invalid lookup result");
665 // Check results of other invocations against first one
666 for (int invocationNdx = 1; invocationNdx < numInvocations; invocationNdx++)
668 for (int lookupNdx = 0; lookupNdx < numLookups; lookupNdx++)
670 const deUint32* refPtr = &outData[lookupNdx*outLookupStride];
671 const deUint32* resPtr = refPtr + invocationNdx*4;
674 for (int ndx = 0; ndx < 4; ndx++)
675 isOk = isOk && (refPtr[ndx] == resPtr[ndx]);
679 m_testCtx.getLog() << TestLog::Message << "ERROR: invocation " << invocationNdx << " result "
680 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(resPtr), tcu::Format::HexIterator<deUint32>(resPtr+4))
681 << " for lookup " << lookupNdx << " doesn't match result from first invocation "
682 << tcu::formatArray(tcu::Format::HexIterator<deUint32>(refPtr), tcu::Format::HexIterator<deUint32>(refPtr+4))
683 << TestLog::EndMessage;
685 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
686 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Inconsistent lookup results");
695 class BlockArrayIndexingCase : public TestCase
700 BLOCKTYPE_UNIFORM = 0,
705 BlockArrayIndexingCase (Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType);
706 ~BlockArrayIndexingCase (void);
709 IterateResult iterate (void);
712 BlockArrayIndexingCase (const BlockArrayIndexingCase&);
713 BlockArrayIndexingCase& operator= (const BlockArrayIndexingCase&);
715 void getShaderSpec (ShaderSpec* spec, int numInstances, int numReads, const int* readIndices, const RenderContext& renderContext) const;
717 const BlockType m_blockType;
718 const IndexExprType m_indexExprType;
719 const ShaderType m_shaderType;
721 const int m_numInstances;
724 BlockArrayIndexingCase::BlockArrayIndexingCase (Context& context, const char* name, const char* description, BlockType blockType, IndexExprType indexExprType, ShaderType shaderType)
725 : TestCase (context, name, description)
726 , m_blockType (blockType)
727 , m_indexExprType (indexExprType)
728 , m_shaderType (shaderType)
733 BlockArrayIndexingCase::~BlockArrayIndexingCase (void)
737 void BlockArrayIndexingCase::init (void)
739 if (!contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)))
741 if (m_shaderType == SHADERTYPE_GEOMETRY)
742 TCU_CHECK_AND_THROW(NotSupportedError,
743 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
744 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
746 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
747 TCU_CHECK_AND_THROW(NotSupportedError,
748 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
749 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
751 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
752 TCU_CHECK_AND_THROW(NotSupportedError,
753 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
754 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of interface blocks.");
757 if (m_blockType == BLOCKTYPE_BUFFER)
759 const deUint32 limitPnames[] =
761 GL_MAX_VERTEX_SHADER_STORAGE_BLOCKS,
762 GL_MAX_FRAGMENT_SHADER_STORAGE_BLOCKS,
763 GL_MAX_GEOMETRY_SHADER_STORAGE_BLOCKS,
764 GL_MAX_TESS_CONTROL_SHADER_STORAGE_BLOCKS,
765 GL_MAX_TESS_EVALUATION_SHADER_STORAGE_BLOCKS,
766 GL_MAX_COMPUTE_SHADER_STORAGE_BLOCKS
769 const glw::Functions& gl = m_context.getRenderContext().getFunctions();
772 gl.getIntegerv(limitPnames[m_shaderType], &maxBlocks);
773 GLU_EXPECT_NO_ERROR(gl.getError(), "glGetIntegerv()");
775 if (maxBlocks < 2 + m_numInstances)
776 throw tcu::NotSupportedError("Not enough shader storage blocks supported for shader type");
780 void BlockArrayIndexingCase::getShaderSpec (ShaderSpec* spec, int numInstances, int numReads, const int* readIndices, const RenderContext& renderContext) const
782 const int binding = 2;
783 const char* blockName = "Block";
784 const char* instanceName = "block";
785 const char* indicesPrefix = "index";
786 const char* resultPrefix = "result";
787 const char* interfaceName = m_blockType == BLOCKTYPE_UNIFORM ? "uniform" : "buffer";
788 const char* layout = m_blockType == BLOCKTYPE_UNIFORM ? "std140" : "std430";
789 const bool isES32 = contextSupports(renderContext.getType(), glu::ApiType::es(3, 2));
790 std::ostringstream global;
791 std::ostringstream code;
793 if (!isES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
794 global << "#extension GL_EXT_gpu_shader5 : require\n";
796 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
797 global << "const highp int indexBase = 1;\n";
800 "layout(" << layout << ", binding = " << binding << ") " << interfaceName << " " << blockName << "\n"
803 "} " << instanceName << "[" << numInstances << "];\n";
805 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
807 for (int readNdx = 0; readNdx < numReads; readNdx++)
809 const string varName = indicesPrefix + de::toString(readNdx);
810 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
813 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
814 declareUniformIndexVars(global, indicesPrefix, numReads);
816 for (int readNdx = 0; readNdx < numReads; readNdx++)
818 const string varName = resultPrefix + de::toString(readNdx);
819 spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
822 for (int readNdx = 0; readNdx < numReads; readNdx++)
824 code << resultPrefix << readNdx << " = " << instanceName << "[";
826 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
827 code << readIndices[readNdx];
828 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
829 code << "indexBase + " << (readIndices[readNdx]-1);
831 code << indicesPrefix << readNdx;
833 code << "].value;\n";
836 spec->version = isES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
837 spec->globalDeclarations = global.str();
838 spec->source = code.str();
841 BlockArrayIndexingCase::IterateResult BlockArrayIndexingCase::iterate (void)
843 const int numInvocations = 32;
844 const int numInstances = m_numInstances;
845 const int numReads = 4;
846 vector<int> readIndices (numReads);
847 vector<deUint32> inValues (numInstances);
848 vector<deUint32> outValues (numInvocations*numReads);
849 ShaderSpec shaderSpec;
850 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_blockType) ^ deInt32Hash(m_indexExprType));
852 for (int readNdx = 0; readNdx < numReads; readNdx++)
853 readIndices[readNdx] = rnd.getInt(0, numInstances-1);
855 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
856 inValues[instanceNdx] = rnd.getUint32();
858 getShaderSpec(&shaderSpec, numInstances, numReads, &readIndices[0], m_context.getRenderContext());
861 const RenderContext& renderCtx = m_context.getRenderContext();
862 const glw::Functions& gl = renderCtx.getFunctions();
863 const int baseBinding = 2;
864 const BufferVector buffers (renderCtx, numInstances);
865 const deUint32 bufTarget = m_blockType == BLOCKTYPE_BUFFER ? GL_SHADER_STORAGE_BUFFER : GL_UNIFORM_BUFFER;
866 ShaderExecutorPtr shaderExecutor (createExecutor(renderCtx, m_shaderType, shaderSpec));
867 vector<int> expandedIndices;
868 vector<void*> inputs;
869 vector<void*> outputs;
871 m_testCtx.getLog() << *shaderExecutor;
873 if (!shaderExecutor->isOk())
874 TCU_FAIL("Compile failed");
876 shaderExecutor->useProgram();
878 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
880 gl.bindBuffer(bufTarget, buffers[instanceNdx]);
881 gl.bufferData(bufTarget, (glw::GLsizeiptr)sizeof(deUint32), &inValues[instanceNdx], GL_STATIC_DRAW);
882 gl.bindBufferBase(bufTarget, baseBinding+instanceNdx, buffers[instanceNdx]);
885 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
887 expandedIndices.resize(numInvocations * readIndices.size());
889 for (int readNdx = 0; readNdx < numReads; readNdx++)
891 int* dst = &expandedIndices[numInvocations*readNdx];
892 std::fill(dst, dst+numInvocations, readIndices[readNdx]);
895 for (int readNdx = 0; readNdx < numReads; readNdx++)
896 inputs.push_back(&expandedIndices[readNdx*numInvocations]);
898 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
899 uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numReads, &readIndices[0]);
901 for (int readNdx = 0; readNdx < numReads; readNdx++)
902 outputs.push_back(&outValues[readNdx*numInvocations]);
904 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
906 shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
909 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
911 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
913 for (int readNdx = 0; readNdx < numReads; readNdx++)
915 const deUint32 refValue = inValues[readIndices[readNdx]];
916 const deUint32 resValue = outValues[readNdx*numInvocations + invocationNdx];
918 if (refValue != resValue)
920 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
921 << ", read " << readNdx << ": expected "
922 << tcu::toHex(refValue) << ", got " << tcu::toHex(resValue)
923 << TestLog::EndMessage;
925 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
926 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
934 class AtomicCounterIndexingCase : public TestCase
937 AtomicCounterIndexingCase (Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType);
938 ~AtomicCounterIndexingCase (void);
941 IterateResult iterate (void);
944 AtomicCounterIndexingCase (const AtomicCounterIndexingCase&);
945 AtomicCounterIndexingCase& operator= (const AtomicCounterIndexingCase&);
947 void getShaderSpec (ShaderSpec* spec, int numCounters, int numOps, const int* opIndices, const RenderContext& renderContext) const;
949 const IndexExprType m_indexExprType;
950 const glu::ShaderType m_shaderType;
953 AtomicCounterIndexingCase::AtomicCounterIndexingCase (Context& context, const char* name, const char* description, IndexExprType indexExprType, ShaderType shaderType)
954 : TestCase (context, name, description)
955 , m_indexExprType (indexExprType)
956 , m_shaderType (shaderType)
960 AtomicCounterIndexingCase::~AtomicCounterIndexingCase (void)
964 void AtomicCounterIndexingCase::init (void)
966 if (!contextSupports(m_context.getRenderContext().getType(), glu::ApiType::es(3, 2)))
968 if (m_shaderType == SHADERTYPE_GEOMETRY)
969 TCU_CHECK_AND_THROW(NotSupportedError,
970 m_context.getContextInfo().isExtensionSupported("GL_EXT_geometry_shader"),
971 "GL_EXT_geometry_shader extension is required to run geometry shader tests.");
973 if (m_shaderType == SHADERTYPE_TESSELLATION_CONTROL || m_shaderType == SHADERTYPE_TESSELLATION_EVALUATION)
974 TCU_CHECK_AND_THROW(NotSupportedError,
975 m_context.getContextInfo().isExtensionSupported("GL_EXT_tessellation_shader"),
976 "GL_EXT_tessellation_shader extension is required to run tessellation shader tests.");
978 if (m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
979 TCU_CHECK_AND_THROW(NotSupportedError,
980 m_context.getContextInfo().isExtensionSupported("GL_EXT_gpu_shader5"),
981 "GL_EXT_gpu_shader5 extension is required for dynamic indexing of atomic counters.");
984 if (m_shaderType == glu::SHADERTYPE_VERTEX || m_shaderType == glu::SHADERTYPE_FRAGMENT)
986 int numAtomicCounterBuffers = 0;
987 m_context.getRenderContext().getFunctions().getIntegerv(m_shaderType == glu::SHADERTYPE_VERTEX ? GL_MAX_VERTEX_ATOMIC_COUNTER_BUFFERS
988 : GL_MAX_FRAGMENT_ATOMIC_COUNTER_BUFFERS,
989 &numAtomicCounterBuffers);
991 if (numAtomicCounterBuffers == 0)
993 const string message = "Atomic counters not supported in " + string(glu::getShaderTypeName(m_shaderType)) + " shader";
994 TCU_THROW(NotSupportedError, message.c_str());
999 void AtomicCounterIndexingCase::getShaderSpec (ShaderSpec* spec, int numCounters, int numOps, const int* opIndices, const RenderContext& renderContext) const
1001 const char* indicesPrefix = "index";
1002 const char* resultPrefix = "result";
1003 const bool isES32 = contextSupports(renderContext.getType(), glu::ApiType::es(3, 2));
1004 std::ostringstream global;
1005 std::ostringstream code;
1007 if (!isES32 && m_indexExprType != INDEX_EXPR_TYPE_CONST_LITERAL && m_indexExprType != INDEX_EXPR_TYPE_CONST_EXPRESSION)
1008 global << "#extension GL_EXT_gpu_shader5 : require\n";
1010 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1011 global << "const highp int indexBase = 1;\n";
1014 "layout(binding = 0) uniform atomic_uint counter[" << numCounters << "];\n";
1016 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1018 for (int opNdx = 0; opNdx < numOps; opNdx++)
1020 const string varName = indicesPrefix + de::toString(opNdx);
1021 spec->inputs.push_back(Symbol(varName, VarType(TYPE_INT, PRECISION_HIGHP)));
1024 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1025 declareUniformIndexVars(global, indicesPrefix, numOps);
1027 for (int opNdx = 0; opNdx < numOps; opNdx++)
1029 const string varName = resultPrefix + de::toString(opNdx);
1030 spec->outputs.push_back(Symbol(varName, VarType(TYPE_UINT, PRECISION_HIGHP)));
1033 for (int opNdx = 0; opNdx < numOps; opNdx++)
1035 code << resultPrefix << opNdx << " = atomicCounterIncrement(counter[";
1037 if (m_indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL)
1038 code << opIndices[opNdx];
1039 else if (m_indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1040 code << "indexBase + " << (opIndices[opNdx]-1);
1042 code << indicesPrefix << opNdx;
1047 spec->version = isES32 ? GLSL_VERSION_320_ES : GLSL_VERSION_310_ES;
1048 spec->globalDeclarations = global.str();
1049 spec->source = code.str();
1052 AtomicCounterIndexingCase::IterateResult AtomicCounterIndexingCase::iterate (void)
1054 const RenderContext& renderCtx = m_context.getRenderContext();
1055 const glw::Functions& gl = renderCtx.getFunctions();
1056 const Buffer counterBuffer (renderCtx);
1058 const int numInvocations = 32;
1059 const int numCounters = 4;
1060 const int numOps = 4;
1061 vector<int> opIndices (numOps);
1062 vector<deUint32> outValues (numInvocations*numOps);
1063 ShaderSpec shaderSpec;
1064 de::Random rnd (deInt32Hash(m_shaderType) ^ deInt32Hash(m_indexExprType));
1066 for (int opNdx = 0; opNdx < numOps; opNdx++)
1067 opIndices[opNdx] = rnd.getInt(0, numOps-1);
1069 getShaderSpec(&shaderSpec, numCounters, numOps, &opIndices[0], m_context.getRenderContext());
1072 const BufferVector buffers (renderCtx, numCounters);
1073 ShaderExecutorPtr shaderExecutor (createExecutor(renderCtx, m_shaderType, shaderSpec));
1074 vector<int> expandedIndices;
1075 vector<void*> inputs;
1076 vector<void*> outputs;
1078 m_testCtx.getLog() << *shaderExecutor;
1080 if (!shaderExecutor->isOk())
1081 TCU_FAIL("Compile failed");
1084 const int bufSize = getProgramResourceInt(gl, shaderExecutor->getProgram(), GL_ATOMIC_COUNTER_BUFFER, 0, GL_BUFFER_DATA_SIZE);
1085 const int maxNdx = maxElement(opIndices);
1086 std::vector<deUint8> emptyData (numCounters*4, 0);
1088 if (bufSize < (maxNdx+1)*4)
1089 TCU_FAIL((string("GL reported invalid buffer size " + de::toString(bufSize)).c_str()));
1091 gl.bindBuffer(GL_ATOMIC_COUNTER_BUFFER, *counterBuffer);
1092 gl.bufferData(GL_ATOMIC_COUNTER_BUFFER, (glw::GLsizeiptr)emptyData.size(), &emptyData[0], GL_STATIC_DRAW);
1093 gl.bindBufferBase(GL_ATOMIC_COUNTER_BUFFER, 0, *counterBuffer);
1094 GLU_EXPECT_NO_ERROR(gl.getError(), "Atomic counter buffer initialization failed");
1097 shaderExecutor->useProgram();
1099 if (m_indexExprType == INDEX_EXPR_TYPE_DYNAMIC_UNIFORM)
1101 expandedIndices.resize(numInvocations * opIndices.size());
1103 for (int opNdx = 0; opNdx < numOps; opNdx++)
1105 int* dst = &expandedIndices[numInvocations*opNdx];
1106 std::fill(dst, dst+numInvocations, opIndices[opNdx]);
1109 for (int opNdx = 0; opNdx < numOps; opNdx++)
1110 inputs.push_back(&expandedIndices[opNdx*numInvocations]);
1112 else if (m_indexExprType == INDEX_EXPR_TYPE_UNIFORM)
1113 uploadUniformIndices(gl, shaderExecutor->getProgram(), "index", numOps, &opIndices[0]);
1115 for (int opNdx = 0; opNdx < numOps; opNdx++)
1116 outputs.push_back(&outValues[opNdx*numInvocations]);
1118 GLU_EXPECT_NO_ERROR(gl.getError(), "Setup failed");
1120 shaderExecutor->execute(numInvocations, inputs.empty() ? DE_NULL : &inputs[0], &outputs[0]);
1123 m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
1126 vector<int> numHits (numCounters, 0); // Number of hits per counter.
1127 vector<deUint32> counterValues (numCounters);
1128 vector<vector<bool> > counterMasks (numCounters);
1130 for (int opNdx = 0; opNdx < numOps; opNdx++)
1131 numHits[opIndices[opNdx]] += 1;
1133 // Read counter values
1135 const void* mapPtr = DE_NULL;
1139 mapPtr = gl.mapBufferRange(GL_ATOMIC_COUNTER_BUFFER, 0, numCounters*4, GL_MAP_READ_BIT);
1140 GLU_EXPECT_NO_ERROR(gl.getError(), "glMapBufferRange(GL_ATOMIC_COUNTER_BUFFER)");
1142 std::copy((const deUint32*)mapPtr, (const deUint32*)mapPtr + numCounters, &counterValues[0]);
1143 gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1148 gl.unmapBuffer(GL_ATOMIC_COUNTER_BUFFER);
1153 // Verify counter values
1154 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1156 const deUint32 refCount = (deUint32)(numHits[counterNdx]*numInvocations);
1157 const deUint32 resCount = counterValues[counterNdx];
1159 if (refCount != resCount)
1161 m_testCtx.getLog() << TestLog::Message << "ERROR: atomic counter " << counterNdx << " has value " << resCount
1162 << ", expected " << refCount
1163 << TestLog::EndMessage;
1165 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1166 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid atomic counter value");
1170 // Allocate bitmasks - one bit per each valid result value
1171 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1173 const int counterValue = numHits[counterNdx]*numInvocations;
1174 counterMasks[counterNdx].resize(counterValue, false);
1177 // Verify result values from shaders
1178 for (int invocationNdx = 0; invocationNdx < numInvocations; invocationNdx++)
1180 for (int opNdx = 0; opNdx < numOps; opNdx++)
1182 const int counterNdx = opIndices[opNdx];
1183 const deUint32 resValue = outValues[opNdx*numInvocations + invocationNdx];
1184 const bool rangeOk = de::inBounds(resValue, 0u, (deUint32)counterMasks[counterNdx].size());
1185 const bool notSeen = rangeOk && !counterMasks[counterNdx][resValue];
1186 const bool isOk = rangeOk && notSeen;
1190 m_testCtx.getLog() << TestLog::Message << "ERROR: at invocation " << invocationNdx
1191 << ", op " << opNdx << ": got invalid result value "
1193 << TestLog::EndMessage;
1195 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1196 m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Invalid result value");
1200 // Mark as used - no other invocation should see this value from same counter.
1201 counterMasks[counterNdx][resValue] = true;
1206 if (m_testCtx.getTestResult() == QP_TEST_RESULT_PASS)
1208 // Consistency check - all masks should be 1 now
1209 for (int counterNdx = 0; counterNdx < numCounters; counterNdx++)
1211 for (vector<bool>::const_iterator i = counterMasks[counterNdx].begin(); i != counterMasks[counterNdx].end(); i++)
1212 TCU_CHECK_INTERNAL(*i);
1222 OpaqueTypeIndexingTests::OpaqueTypeIndexingTests (Context& context)
1223 : TestCaseGroup(context, "opaque_type_indexing", "Opaque Type Indexing Tests")
1227 OpaqueTypeIndexingTests::~OpaqueTypeIndexingTests (void)
1231 void OpaqueTypeIndexingTests::init (void)
1237 const char* description;
1240 { INDEX_EXPR_TYPE_CONST_LITERAL, "const_literal", "Indexing by constant literal" },
1241 { INDEX_EXPR_TYPE_CONST_EXPRESSION, "const_expression", "Indexing by constant expression" },
1242 { INDEX_EXPR_TYPE_UNIFORM, "uniform", "Indexing by uniform value" },
1243 { INDEX_EXPR_TYPE_DYNAMIC_UNIFORM, "dynamically_uniform", "Indexing by dynamically uniform expression" }
1252 { SHADERTYPE_VERTEX, "vertex" },
1253 { SHADERTYPE_FRAGMENT, "fragment" },
1254 { SHADERTYPE_COMPUTE, "compute" },
1255 { SHADERTYPE_GEOMETRY, "geometry" },
1256 { SHADERTYPE_TESSELLATION_CONTROL, "tessellation_control" },
1257 { SHADERTYPE_TESSELLATION_EVALUATION, "tessellation_evaluation" }
1262 static const DataType samplerTypes[] =
1264 // \note 1D images will be added by a later extension.
1268 TYPE_SAMPLER_2D_ARRAY,
1270 // TYPE_SAMPLER_1D_SHADOW,
1271 TYPE_SAMPLER_2D_SHADOW,
1272 TYPE_SAMPLER_CUBE_SHADOW,
1273 TYPE_SAMPLER_2D_ARRAY_SHADOW,
1274 // TYPE_INT_SAMPLER_1D,
1275 TYPE_INT_SAMPLER_2D,
1276 TYPE_INT_SAMPLER_CUBE,
1277 TYPE_INT_SAMPLER_2D_ARRAY,
1278 TYPE_INT_SAMPLER_3D,
1279 // TYPE_UINT_SAMPLER_1D,
1280 TYPE_UINT_SAMPLER_2D,
1281 TYPE_UINT_SAMPLER_CUBE,
1282 TYPE_UINT_SAMPLER_2D_ARRAY,
1283 TYPE_UINT_SAMPLER_3D,
1284 TYPE_SAMPLER_CUBE_ARRAY,
1285 TYPE_SAMPLER_CUBE_ARRAY_SHADOW,
1286 TYPE_INT_SAMPLER_CUBE_ARRAY,
1287 TYPE_UINT_SAMPLER_CUBE_ARRAY
1290 tcu::TestCaseGroup* const samplerGroup = new tcu::TestCaseGroup(m_testCtx, "sampler", "Sampler Array Indexing Tests");
1291 addChild(samplerGroup);
1293 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1295 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1296 tcu::TestCaseGroup* const indexGroup = new tcu::TestCaseGroup(m_testCtx, indexingTypes[indexTypeNdx].name, indexingTypes[indexTypeNdx].description);
1297 samplerGroup->addChild(indexGroup);
1299 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1301 const ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1302 tcu::TestCaseGroup* const shaderGroup = new tcu::TestCaseGroup(m_testCtx, shaderTypes[shaderTypeNdx].name, "");
1303 indexGroup->addChild(shaderGroup);
1305 for (int samplerTypeNdx = 0; samplerTypeNdx < DE_LENGTH_OF_ARRAY(samplerTypes); samplerTypeNdx++)
1307 const DataType samplerType = samplerTypes[samplerTypeNdx];
1308 const char* samplerName = getDataTypeName(samplerType);
1309 const string caseName = de::toLower(samplerName);
1311 shaderGroup->addChild(new SamplerIndexingCase(m_context, caseName.c_str(), "", shaderType, samplerType, indexExprType));
1317 // .ubo / .ssbo / .atomic_counter
1319 tcu::TestCaseGroup* const uboGroup = new tcu::TestCaseGroup(m_testCtx, "ubo", "Uniform Block Instance Array Indexing Tests");
1320 tcu::TestCaseGroup* const ssboGroup = new tcu::TestCaseGroup(m_testCtx, "ssbo", "Buffer Block Instance Array Indexing Tests");
1321 tcu::TestCaseGroup* const acGroup = new tcu::TestCaseGroup(m_testCtx, "atomic_counter", "Atomic Counter Array Indexing Tests");
1323 addChild(ssboGroup);
1326 for (int indexTypeNdx = 0; indexTypeNdx < DE_LENGTH_OF_ARRAY(indexingTypes); indexTypeNdx++)
1328 const IndexExprType indexExprType = indexingTypes[indexTypeNdx].type;
1329 const char* indexExprName = indexingTypes[indexTypeNdx].name;
1330 const char* indexExprDesc = indexingTypes[indexTypeNdx].description;
1332 for (int shaderTypeNdx = 0; shaderTypeNdx < DE_LENGTH_OF_ARRAY(shaderTypes); shaderTypeNdx++)
1334 const ShaderType shaderType = shaderTypes[shaderTypeNdx].type;
1335 const string name = string(indexExprName) + "_" + shaderTypes[shaderTypeNdx].name;
1337 uboGroup->addChild (new BlockArrayIndexingCase (m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_UNIFORM, indexExprType, shaderType));
1338 acGroup->addChild (new AtomicCounterIndexingCase (m_context, name.c_str(), indexExprDesc, indexExprType, shaderType));
1340 if (indexExprType == INDEX_EXPR_TYPE_CONST_LITERAL || indexExprType == INDEX_EXPR_TYPE_CONST_EXPRESSION)
1341 ssboGroup->addChild (new BlockArrayIndexingCase (m_context, name.c_str(), indexExprDesc, BlockArrayIndexingCase::BLOCKTYPE_BUFFER, indexExprType, shaderType));