if (m_testCtx.getCommandLine().getRunMode() != tcu::RUNMODE_EXECUTE)
{
- // \todo [2016-11-16 pyry] Create DummyRenderContext instead to allow generating all case lists
+ // \todo [2016-11-16 pyry] Create EmptyRenderContext instead to allow generating all case lists
// on a system that doesn't support some GL(ES) versions.
renderCfg.surfaceType = glu::RenderConfig::SURFACETYPE_OFFSCREEN_GENERIC;
}
SETUNIFORM(loc, 1, vec.getPtr()); \
CHECK_SET_UNIFORM(name); \
} \
- struct SetUniform##VECTYPE##Dummy_s \
+ struct SetUniform##VECTYPE##Unused_s \
{ \
int unused; \
}
SETUNIFORM(loc, arraySize, vec->getPtr()); \
CHECK_SET_UNIFORM(name); \
} \
- struct SetUniformPtr##VECTYPE##Dummy_s \
+ struct SetUniformPtr##VECTYPE##Unused_s \
{ \
int unused; \
}
} /* for (all created shader objects) */
}
-/** Dummy init function */
+/** Empty init function */
void InputVariablesCannotBeModifiedTest::init()
{
/* Left blank on purpose */
}
}
-/** Dummy init function */
+/** Empty init function */
void InvalidUseCasesForAllNotFuncsAndExclMarkOpTest::init()
{
/* Left blank on purpose */
}
}
-/** Dummy init function */
+/** Empty init function */
void InvalidVSInputsTest::init()
{
/* Left blank on purpose */
}
}
-/** Dummy init function */
+/** Empty init function */
void ParenthesisInLayoutQualifierIntegerValuesTest::init()
{
/* Left blank on purpose */
} /* switch (iteration) */
}
-/** Returns a dummy vertex shader body, with main() entry-point using code passed by
+/** Returns a vertex shader body, with main() entry-point using code passed by
* the @param main_body argument.
*
* @param context_type Running rendering context's type.
return vs_body_sstream.str();
}
-/** Dummy init function */
+/** Empty init function */
void PerVertexValidationTest::init()
{
/* Left blank on purpose */
return structAllowed;
}
-/** Dummy init function */
+/** Empty init function */
void ReservedNamesTest::init()
{
/* Left blank on purpose */
}
}
-/** Dummy init function */
+/** Empty init function */
void SparseBuffersWithCopyOpsTest::init()
{
/* Nothing to do here */
if (clipdistances_array_size == 0 && culldistances_array_size == 0)
{
- /* Skip the dummy iteration */
+ /* Skip the empty iteration */
continue;
}
}
}
- /* Dummy vertex shader. */
- const char* dummyVsh()
+ /* Minimal vertex shader. */
+ const char* minimalVsh()
{
return "#version 150 core\n"
"void main() {\n"
"}\n";
}
- /* Dummy fragment shader */
- const char* dummyFsh()
+ /* Minimal fragment shader */
+ const char* minimalFsh()
{
return "#version 150 core\n"
"void main() {}\n";
// Setup checker program
m_checker_program =
- new glu::ShaderProgram(m_context.getRenderContext(), glu::makeVtxFragSources(dummyVsh(), dummyFsh()));
+ new glu::ShaderProgram(m_context.getRenderContext(), glu::makeVtxFragSources(minimalVsh(), minimalFsh()));
if (!m_checker_program->isOk())
{
TCU_FAIL("Checker program compilation failed");
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLbyte dummy_data = 0;
+ glw::GLbyte unused_data = 0;
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data,
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBufferStorage failed.");
;
/* Test. */
- m_pClearNamedBufferData(not_a_buffer_name, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferData(not_a_buffer_name, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferData", GL_INVALID_OPERATION,
" if buffer is not the name of an existing buffer object.");
;
/* Test. */
- m_pClearNamedBufferData(buffer, invalid_internal_format, GL_RED, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferData(buffer, invalid_internal_format, GL_RED, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferData", GL_INVALID_ENUM,
" if internal format is not one of the valid sized internal formats "
(void)(glw::GLbyte*) m_pMapNamedBuffer(buffer, GL_READ_ONLY);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pClearNamedBufferData(buffer, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferData(buffer, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferData", GL_INVALID_OPERATION,
" if any part of the specified range of the buffer"
/* Test of mapped buffer clear error behavior verification (glMapNamedBufferRange version). */
{
- (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pClearNamedBufferData(buffer, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferData(buffer, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferData", GL_INVALID_OPERATION,
" if any part of the specified range of the buffer"
/* Test of persistently mapped buffer clear error with behavior verification (glMapNamedBufferRange version). */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pClearNamedBufferData(buffer, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferData(buffer, GL_R8, GL_RED, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferData", GL_NO_ERROR,
" if any part of the specified range of the buffer"
;
/* Test. */
- m_pClearNamedBufferData(buffer, GL_R8, invalid_format, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferData(buffer, GL_R8, invalid_format, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog(
"glClearNamedBufferData", GL_INVALID_VALUE,
;
/* Test. */
- m_pClearNamedBufferData(buffer, GL_R8, GL_RED, invalid_type, &dummy_data);
+ m_pClearNamedBufferData(buffer, GL_R8, GL_RED, invalid_type, &unused_data);
is_ok &= ErrorCheckAndLog(
"glClearNamedBufferData", GL_INVALID_VALUE,
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data,
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
;
/* Test. */
- m_pClearNamedBufferSubData(not_a_buffer_name, GL_R8, 0, sizeof(dummy_data), GL_RGBA, GL_UNSIGNED_BYTE,
- &dummy_data);
+ m_pClearNamedBufferSubData(not_a_buffer_name, GL_R8, 0, sizeof(unused_data), GL_RGBA, GL_UNSIGNED_BYTE,
+ &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_OPERATION,
" if buffer is not the name of an existing buffer object.");
;
/* Test. */
- m_pClearNamedBufferData(buffer, invalid_internal_format, GL_RGBA, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferData(buffer, invalid_internal_format, GL_RGBA, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_ENUM,
" if internal format is not one of the valid sized internal formats "
/* Test incorrect offset alignment error behavior. */
{
/* Test. */
- m_pClearNamedBufferSubData(buffer, GL_RGBA8, sizeof(dummy_data[0]), sizeof(dummy_data), GL_RGBA,
- GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_RGBA8, sizeof(unused_data[0]), sizeof(unused_data), GL_RGBA,
+ GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_VALUE,
"if offset is not multiples of the number of basic machine units (GLubyte)"
/* Test incorrect range alignment error behavior. */
{
- m_pClearNamedBufferSubData(buffer, GL_RGBA8, 0, sizeof(dummy_data) - sizeof(dummy_data[0]), GL_RGBA,
- GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_RGBA8, 0, sizeof(unused_data) - sizeof(unused_data[0]), GL_RGBA,
+ GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_VALUE,
"if range is not multiples of the number of basic machine units (GLubyte)"
/* Test negative offset error behavior. */
{
/* Test. */
- m_pClearNamedBufferSubData(buffer, GL_R8, -1, sizeof(dummy_data), GL_RGBA, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, -1, sizeof(unused_data), GL_RGBA, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_VALUE, " if offset or size is negative.");
}
/* Test negative size error behavior. */
{
/* Test. */
- m_pClearNamedBufferSubData(buffer, GL_R8, 0, -((glw::GLsizei)sizeof(dummy_data)), GL_RGBA, GL_UNSIGNED_BYTE,
- &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, 0, -((glw::GLsizei)sizeof(unused_data)), GL_RGBA, GL_UNSIGNED_BYTE,
+ &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_VALUE, " if offset or size is negative.");
}
/* Test size overflow error behavior. */
{
/* Test. */
- m_pClearNamedBufferSubData(buffer, GL_R8, 0, 2 * sizeof(dummy_data), GL_RGBA, GL_UNSIGNED_BYTE,
- &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, 0, 2 * sizeof(unused_data), GL_RGBA, GL_UNSIGNED_BYTE,
+ &unused_data);
is_ok &= ErrorCheckAndLog(
"glClearNamedBufferSubData", GL_INVALID_VALUE,
(void)(glw::GLbyte*) m_pMapNamedBuffer(buffer, GL_READ_ONLY);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(dummy_data), GL_RGBA, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(unused_data), GL_RGBA, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_OPERATION,
" if any part of the specified range of the buffer"
/* Test of mapped buffer clear error behavior verification (glMapNamedBufferRange version). */
{
- (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(dummy_data), GL_RGBA, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(unused_data), GL_RGBA, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_INVALID_OPERATION,
" if any part of the specified range of the buffer"
/* Test of persistently mapped buffer clear error with behavior verification (glMapNamedBufferRange version). */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(dummy_data), GL_RGBA, GL_UNSIGNED_BYTE, &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(unused_data), GL_RGBA, GL_UNSIGNED_BYTE, &unused_data);
is_ok &= ErrorCheckAndLog("glClearNamedBufferSubData", GL_NO_ERROR,
" if any part of the specified range of the buffer"
;
/* Test. */
- m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(dummy_data), invalid_format, GL_UNSIGNED_BYTE,
- &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(unused_data), invalid_format, GL_UNSIGNED_BYTE,
+ &unused_data);
is_ok &= ErrorCheckAndLog(
"glClearNamedBufferSubData", GL_INVALID_VALUE,
;
/* Test. */
- m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(dummy_data), GL_RGBA, invalid_type, &dummy_data);
+ m_pClearNamedBufferSubData(buffer, GL_R8, 0, sizeof(unused_data), GL_RGBA, invalid_type, &unused_data);
is_ok &= ErrorCheckAndLog(
"glClearNamedBufferSubData", GL_INVALID_VALUE,
/* Common variables. */
glw::GLuint buffer_r = 0;
glw::GLuint buffer_w = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer_r);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer_r, sizeof(dummy_data), &dummy_data,
+ m_pNamedBufferStorage(buffer_r, sizeof(unused_data), &unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
gl.createBuffers(1, &buffer_w);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer_w, sizeof(dummy_data), &dummy_data,
+ m_pNamedBufferStorage(buffer_w, sizeof(unused_data), &unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
;
/* Test. */
- m_pCopyNamedBufferSubData(not_a_buffer_name, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(not_a_buffer_name, buffer_w, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_OPERATION,
" if readBuffer is not the name of an existing buffer object.");
- m_pCopyNamedBufferSubData(buffer_r, not_a_buffer_name, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, not_a_buffer_name, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_OPERATION,
" if writeBuffer is not the name of an existing buffer object.");
/* Test negative read offset error behavior. */
{
/* Test. */
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, -1, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, -1, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_VALUE, "if readOffset is negative.");
}
/* Test negative write offset error behavior. */
{
/* Test. */
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, -1, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, -1, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_VALUE, "if writeOffset is negative.");
}
/* Test overflow size error behavior. */
{
/* Test. */
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, 2 * sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, 2 * sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_VALUE,
" if size is greater than the size of the source buffer object.");
/* Test overflow read offset and size error behavior. */
{
/* Test. */
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, sizeof(dummy_data) / 2, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, sizeof(unused_data) / 2, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_VALUE,
" if readOffset+size is greater than the size of the source buffer object.");
/* Test overflow write offset and size error behavior. */
{
/* Test. */
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, sizeof(dummy_data) / 2, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, sizeof(unused_data) / 2, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_VALUE,
" if writeOffset+size is greater than the size of the source buffer object.");
/* Test same buffer overlapping error behavior. */
{
/* Test. */
- m_pCopyNamedBufferSubData(buffer_w, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_w, buffer_w, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_VALUE,
" if the source and destination are the same buffer object, and the ranges"
(void)(glw::GLbyte*) m_pMapNamedBuffer(buffer_r, GL_READ_ONLY);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_OPERATION,
" if the source buffer object is mapped with MapBuffer.");
(void)(glw::GLbyte*) m_pMapNamedBuffer(buffer_w, GL_READ_ONLY);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_OPERATION,
" if the destination buffer object is mapped with MapBuffer.");
/* Test of mapped read buffer copy error behavior verification (glMapNamedBufferRange version). */
{
- (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer_r, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer_r, 0, sizeof(unused_data), GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_OPERATION,
" if the source buffer object is mapped with MapBuffer.");
/* Test of mapped write buffer copy error behavior verification (glMapNamedBufferRange version). */
{
- (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer_w, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer_w, 0, sizeof(unused_data), GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_INVALID_OPERATION,
" if the destination buffer object is mapped with MapBuffer.");
/* Test of persistently mapped read buffer copy error with behavior verification. */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer_r, 0, sizeof(dummy_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pMapNamedBufferRange(buffer_r, 0, sizeof(unused_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_NO_ERROR,
" if the source buffer object is mapped using "
/* Test of persistently mapped write buffer copy error with behavior verification. */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer_w, 0, sizeof(dummy_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pMapNamedBufferRange(buffer_w, 0, sizeof(unused_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(dummy_data));
+ m_pCopyNamedBufferSubData(buffer_r, buffer_w, 0, 0, sizeof(unused_data));
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
is_ok &= ErrorCheckAndLog("glCopyNamedBufferSubData", GL_NO_ERROR,
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test invalid buffer name flush error behavior. */
/* Test negative offset flush error behavior. */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
m_pFlushMappedNamedBufferRange(buffer, -1, 1);
/* Test negative length flush error behavior. */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
m_pFlushMappedNamedBufferRange(buffer, 0, -1);
/* Test length exceeds the mapping size flush error behavior. */
{
- (void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data) / 2, GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data) / 2,
+ GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pFlushMappedNamedBufferRange(buffer, 0, sizeof(dummy_data));
+ m_pFlushMappedNamedBufferRange(buffer, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glFlushMappedNamedBufferRange", GL_INVALID_VALUE,
" if length exceeds the size of the mapping.");
/* Test offset + length exceeds the mapping size flush error behavior. */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_WRITE_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pFlushMappedNamedBufferRange(buffer, 1, sizeof(dummy_data));
+ m_pFlushMappedNamedBufferRange(buffer, 1, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glFlushMappedNamedBufferRange", GL_INVALID_VALUE,
" if offset + length exceeds the size of the mapping.");
/* Test not mapped buffer flush error behavior. */
{
- m_pFlushMappedNamedBufferRange(buffer, 0, sizeof(dummy_data));
+ m_pFlushMappedNamedBufferRange(buffer, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glFlushMappedNamedBufferRange", GL_INVALID_OPERATION,
" if the buffer object is not mapped.");
/* Test buffer flush without the MAP_FLUSH_EXPLICIT_BIT error behavior. */
{
- (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_WRITE_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_WRITE_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pFlushMappedNamedBufferRange(buffer, 0, sizeof(dummy_data));
+ m_pFlushMappedNamedBufferRange(buffer, 0, sizeof(unused_data));
is_ok &= ErrorCheckAndLog("glFlushMappedNamedBufferRange", GL_INVALID_OPERATION,
" if the buffer is mapped without the MAP_FLUSH_EXPLICIT_BIT flag.");
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test invalid buffer name in GetNamedBufferParameteriv function error behavior. */
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test invalid buffer name in GetNamedBufferPointerv function error behavior. */
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test invalid buffer name in pGetNamedBufferSubData function error behavior. */
;
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
- m_pGetNamedBufferSubData(not_a_buffer_name, 0, sizeof(dummy_data_query), dummy_data_query);
+ m_pGetNamedBufferSubData(not_a_buffer_name, 0, sizeof(unused_data_query), unused_data_query);
is_ok &= ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_OPERATION,
" if buffer is not the name of an existing buffer object.");
/* Test negative offset error behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
- m_pGetNamedBufferSubData(buffer, -1, sizeof(dummy_data_query), dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, -1, sizeof(unused_data_query), unused_data_query);
is_ok &= ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_VALUE, " if offset is negative.");
}
/* Test negative size error behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
- m_pGetNamedBufferSubData(buffer, 0, -1, dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, 0, -1, unused_data_query);
is_ok &= ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_VALUE, " if size is negative.");
}
/* Test size overflow error behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
- m_pGetNamedBufferSubData(buffer, 0, 2 * sizeof(dummy_data_query), dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, 0, 2 * sizeof(unused_data_query), unused_data_query);
is_ok &= ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_VALUE,
" if size is greater than the value of BUFFER_SIZE for the buffer object.");
/* Test offset+size overflow error behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
- m_pGetNamedBufferSubData(buffer, sizeof(dummy_data_query) / 2, sizeof(dummy_data_query), dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, sizeof(unused_data_query) / 2, sizeof(unused_data_query),
+ unused_data_query);
is_ok &=
ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_VALUE,
/* Test offset overflow error behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
- m_pGetNamedBufferSubData(buffer, sizeof(dummy_data_query) + 1, 0, dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, sizeof(unused_data_query) + 1, 0, unused_data_query);
is_ok &= ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_VALUE,
" if offset is greater than the value of BUFFER_SIZE for the buffer object.");
/* Test mapped buffer query error behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
(void)(glw::GLbyte*) m_pMapNamedBuffer(buffer, GL_WRITE_ONLY);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pGetNamedBufferSubData(buffer, 0, sizeof(dummy_data_query), dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, 0, sizeof(unused_data_query), unused_data_query);
is_ok &= ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_OPERATION,
" if the buffer object is mapped with MapBufferRange.");
/* Test mapped buffer query error behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
- (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_WRITE_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_WRITE_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pGetNamedBufferSubData(buffer, 0, sizeof(dummy_data_query), dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, 0, sizeof(unused_data_query), unused_data_query);
is_ok &= ErrorCheckAndLog("glGetNamedBufferSubData", GL_INVALID_OPERATION,
" if the buffer object is mapped with MapBufferRange.");
/* Test persistently mapped buffer query behavior. */
{
/* Query storage. */
- glw::GLubyte dummy_data_query[sizeof(dummy_data) / sizeof(dummy_data[0])] = {};
+ glw::GLubyte unused_data_query[sizeof(unused_data) / sizeof(unused_data[0])] = {};
/* Test. */
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pGetNamedBufferSubData(buffer, 0, sizeof(dummy_data_query), dummy_data_query);
+ m_pGetNamedBufferSubData(buffer, 0, sizeof(unused_data_query), unused_data_query);
is_ok &= ErrorCheckAndLog(
"glGetNamedBufferSubData", GL_NO_ERROR,
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data, GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test invalid buffer name error behavior. */
/* Common variables. */
glw::GLuint buffer = 0;
glw::GLuint buffer_special_flags = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data,
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
;
/* Test. */
- m_pMapNamedBufferRange(not_a_buffer_name, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ m_pMapNamedBufferRange(not_a_buffer_name, 0, sizeof(unused_data), GL_MAP_READ_BIT);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_OPERATION,
" if buffer is not the name of an existing buffer object.");
/* Test negative offset error behavior. */
{
- glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, -1, sizeof(dummy_data), GL_MAP_READ_BIT);
+ glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, -1, sizeof(unused_data), GL_MAP_READ_BIT);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_VALUE, " if offset is negative.");
/* Test length overflow error behavior. */
{
- glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data) * 2, GL_MAP_READ_BIT);
+ glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data) * 2, GL_MAP_READ_BIT);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_VALUE,
" if length is greater than the value of BUFFER_SIZE"
/* Test (offset+length) overflow error behavior. */
{
glw::GLvoid* mapped_data =
- m_pMapNamedBufferRange(buffer, sizeof(dummy_data) / 2, sizeof(dummy_data), GL_MAP_READ_BIT);
+ m_pMapNamedBufferRange(buffer, sizeof(unused_data) / 2, sizeof(unused_data), GL_MAP_READ_BIT);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_VALUE,
" if offset+length is greater than the value of BUFFER_SIZE"
/* Test mapping of mapped buffer error behavior. */
{
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer.");
glw::GLvoid* subsequent_mapped_data =
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_OPERATION,
" if the buffer object is in a mapped state.");
/* Test access flag read and write bits are not set error behavior. */
{
- glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), 0);
+ glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), 0);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_OPERATION,
" if neither MAP_READ_BIT nor MAP_WRITE_BIT is set.");
for (glw::GLuint i = 0; i < read_access_invalid_flags_count; ++i)
{
- glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data),
+ glw::GLvoid* mapped_data = m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data),
GL_MAP_READ_BIT | read_access_invalid_flags[i]);
is_ok &=
/* Test access flush bit without write bit error behavior. */
{
glw::GLvoid* mapped_data =
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT | GL_MAP_FLUSH_EXPLICIT_BIT);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_OPERATION,
" if MAP_FLUSH_EXPLICIT_BIT is set and MAP_WRITE_BIT is not set.");
gl.createBuffers(1, &buffer_special_flags);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer_special_flags, sizeof(dummy_data), &dummy_data, buffer_flags[i]);
+ m_pNamedBufferStorage(buffer_special_flags, sizeof(unused_data), &unused_data, buffer_flags[i]);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test mapping. */
glw::GLvoid* mapped_data =
- m_pMapNamedBufferRange(buffer_special_flags, 0, sizeof(dummy_data), mapping_flags[i]);
+ m_pMapNamedBufferRange(buffer_special_flags, 0, sizeof(unused_data), mapping_flags[i]);
is_ok &= ErrorCheckAndLog("glMapNamedBufferRange", GL_INVALID_OPERATION, mapping_flags_log[i]);
/* Common variables. */
glw::GLuint buffer = 0;
glw::GLuint immutable_buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
std::stack<glw::GLuint> too_much_buffers;
try
gl.createBuffers(1, &immutable_buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(immutable_buffer, sizeof(dummy_data), &dummy_data, GL_MAP_READ_BIT);
+ m_pNamedBufferStorage(immutable_buffer, sizeof(unused_data), &unused_data, GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test invalid buffer name error behavior. */
;
/* Test. */
- m_pNamedBufferData(not_a_buffer_name, sizeof(dummy_data), dummy_data, GL_DYNAMIC_COPY);
+ m_pNamedBufferData(not_a_buffer_name, sizeof(unused_data), unused_data, GL_DYNAMIC_COPY);
is_ok &= ErrorCheckAndLog("glNamedBufferData", GL_INVALID_OPERATION,
" if buffer is not the name of an existing buffer object.");
;
/* Test. */
- m_pNamedBufferData(buffer, sizeof(dummy_data), dummy_data, invalid_usage);
+ m_pNamedBufferData(buffer, sizeof(unused_data), unused_data, invalid_usage);
is_ok &=
ErrorCheckAndLog("glNamedBufferData", GL_INVALID_ENUM,
/* Test negative size error behavior. */
{
- m_pNamedBufferData(buffer, -1, dummy_data, GL_DYNAMIC_COPY);
+ m_pNamedBufferData(buffer, -1, unused_data, GL_DYNAMIC_COPY);
is_ok &= ErrorCheckAndLog("glNamedBufferData", GL_INVALID_VALUE, " if size is negative.");
}
/* Test immutable buffer error behavior. */
{
- m_pNamedBufferData(immutable_buffer, sizeof(dummy_data) / 2, dummy_data, GL_DYNAMIC_COPY);
+ m_pNamedBufferData(immutable_buffer, sizeof(unused_data) / 2, unused_data, GL_DYNAMIC_COPY);
is_ok &= ErrorCheckAndLog("glNamedBufferData", GL_INVALID_OPERATION,
" if the BUFFER_IMMUTABLE_STORAGE flag of the buffer object is TRUE.");
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
std::stack<glw::GLuint> too_much_buffers;
try
;
/* Test. */
- m_pNamedBufferStorage(not_a_buffer_name, sizeof(dummy_data), dummy_data, GL_MAP_WRITE_BIT);
+ m_pNamedBufferStorage(not_a_buffer_name, sizeof(unused_data), unused_data, GL_MAP_WRITE_BIT);
is_ok &= ErrorCheckAndLog("glNamedBufferStorage", GL_INVALID_OPERATION,
" if buffer is not the name of an existing buffer object.");
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
/* Test negative size. */
- m_pNamedBufferStorage(buffer, -1, dummy_data, GL_DYNAMIC_COPY);
+ m_pNamedBufferStorage(buffer, -1, unused_data, GL_DYNAMIC_COPY);
is_ok &= ErrorCheckAndLog("glNamedBufferStorage", GL_INVALID_VALUE, " if size is negative.");
/* Test zero size. */
- m_pNamedBufferStorage(buffer, 0, dummy_data, GL_DYNAMIC_COPY);
+ m_pNamedBufferStorage(buffer, 0, unused_data, GL_DYNAMIC_COPY);
is_ok &= ErrorCheckAndLog("glNamedBufferStorage", GL_INVALID_VALUE, " if size zero.");
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
/* Test invalid bit. */
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), dummy_data, possibly_invalid_bit);
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), unused_data, possibly_invalid_bit);
is_ok &=
ErrorCheckAndLog("glNamedBufferStorage", GL_INVALID_VALUE,
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
/* Test. */
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), dummy_data, GL_MAP_PERSISTENT_BIT);
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), unused_data, GL_MAP_PERSISTENT_BIT);
is_ok &= ErrorCheckAndLog("glNamedBufferStorage", GL_INVALID_VALUE, " if flags contains MAP_PERSISTENT_BIT "
"but does not contain at least one of "
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
/* Test. */
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), dummy_data,
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_COHERENT_BIT);
is_ok &=
/* Common variables. */
glw::GLuint buffer = 0;
glw::GLuint immutable_storage_buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data,
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_DYNAMIC_STORAGE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
gl.createBuffers(1, &immutable_storage_buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(immutable_storage_buffer, sizeof(dummy_data), &dummy_data, GL_MAP_READ_BIT);
+ m_pNamedBufferStorage(immutable_storage_buffer, sizeof(unused_data), &unused_data, GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Test invalid buffer name error behavior. */
;
/* Test. */
- m_pNamedBufferSubData(not_a_buffer_name, 0, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(not_a_buffer_name, 0, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_INVALID_OPERATION,
" if buffer is not the name of an existing buffer object.");
/* Test negative offset error behavior. */
{
/* Test. */
- m_pNamedBufferSubData(buffer, -1, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(buffer, -1, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_INVALID_VALUE, " if offset or size is negative.");
}
/* Test negative size error behavior. */
{
/* Test. */
- m_pNamedBufferSubData(buffer, 0, -1, &dummy_data);
+ m_pNamedBufferSubData(buffer, 0, -1, &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_INVALID_VALUE, " if offset or size is negative.");
}
/* Test size overflow error behavior. */
{
/* Test. */
- m_pNamedBufferSubData(buffer, 0, sizeof(dummy_data) * 2, &dummy_data);
+ m_pNamedBufferSubData(buffer, 0, sizeof(unused_data) * 2, &unused_data);
is_ok &= ErrorCheckAndLog(
"glNamedBufferSubData", GL_INVALID_VALUE,
/* Test offset+size overflow error behavior. */
{
/* Test. */
- m_pNamedBufferSubData(buffer, sizeof(dummy_data) / 2, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(buffer, sizeof(unused_data) / 2, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog(
"glNamedBufferSubData", GL_INVALID_VALUE,
(void)(glw::GLbyte*) m_pMapNamedBuffer(buffer, GL_READ_ONLY);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pNamedBufferSubData(buffer, 0, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(buffer, 0, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_INVALID_OPERATION,
" if any part of the specified range of the buffer"
/* Test of mapped buffer subdata error behavior verification (with glMapBufferRange). */
{
- (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT);
+ (void)(glw::GLbyte*) m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pNamedBufferSubData(buffer, 0, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(buffer, 0, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_INVALID_OPERATION,
" if any part of the specified range of the buffer"
/* Test of persistently mapped buffer clear error with behavior verification. */
{
(void)(glw::GLbyte*)
- m_pMapNamedBufferRange(buffer, 0, sizeof(dummy_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
+ m_pMapNamedBufferRange(buffer, 0, sizeof(unused_data), GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glMapNamedBuffer failed.");
- m_pNamedBufferSubData(buffer, 0, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(buffer, 0, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_NO_ERROR,
" if any part of the specified range of the buffer"
/* Test DYNAMIC_STORAGE_BIT bit off immutable buffer not set error behavior. */
{
/* Test. */
- m_pNamedBufferSubData(immutable_storage_buffer, 0, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(immutable_storage_buffer, 0, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_INVALID_OPERATION,
" if the value of the BUFFER_IMMUTABLE_STORAGE flag of the buffer object is TRUE"
/* Test DYNAMIC_STORAGE_BIT bit off immutable buffer set no error behavior. */
{
/* Test. */
- m_pNamedBufferSubData(buffer, 0, sizeof(dummy_data), &dummy_data);
+ m_pNamedBufferSubData(buffer, 0, sizeof(unused_data), &unused_data);
is_ok &= ErrorCheckAndLog("glNamedBufferSubData", GL_NO_ERROR,
" if the value of the BUFFER_IMMUTABLE_STORAGE flag of the buffer object is TRUE"
/* Common variables. */
glw::GLuint buffer = 0;
- glw::GLubyte dummy_data[4] = {};
+ glw::GLubyte unused_data[4] = {};
try
{
gl.createBuffers(1, &buffer);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateBuffers failed.");
- m_pNamedBufferStorage(buffer, sizeof(dummy_data), &dummy_data,
+ m_pNamedBufferStorage(buffer, sizeof(unused_data), &unused_data,
GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_DYNAMIC_STORAGE_BIT | GL_MAP_PERSISTENT_BIT);
GLU_EXPECT_NO_ERROR(gl.getError(), "glNamedBuffeStorage failed.");
/* Prepare objects. */
PrepareObjects();
- glw::GLint return_values_dummy_storage[4];
+ glw::GLint return_values_unused_storage[4];
/* Check that INVALID_OPERATION is generated by
GetNamedFramebufferParameteriv if framebuffer is not zero or the name of
an existing framebuffer object. */
- gl.getNamedFramebufferParameteriv(m_fbo_invalid, GL_SAMPLES, return_values_dummy_storage);
+ gl.getNamedFramebufferParameteriv(m_fbo_invalid, GL_SAMPLES, return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_OPERATION, "GetNamedFramebufferParameteriv",
"framebuffer is not zero or the name of an existing framebuffer object.");
/* Check that INVALID_ENUM is generated by GetNamedFramebufferParameteriv
if pname is not one of the accepted parameter names. */
- gl.getNamedFramebufferParameteriv(m_fbo_valid, m_parameter_invalid, return_values_dummy_storage);
+ gl.getNamedFramebufferParameteriv(m_fbo_valid, m_parameter_invalid, return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_ENUM, "GetNamedFramebufferParameteriv",
"pname is not one of the accepted parameter names.");
queried, and pname is not one of DOUBLEBUFFER,
IMPLEMENTATION_COLOR_READ_FORMAT, IMPLEMENTATION_COLOR_READ_TYPE,
SAMPLES, SAMPLE_BUFFERS or STEREO. */
- gl.getNamedFramebufferParameteriv(0, GL_FRAMEBUFFER_DEFAULT_WIDTH, return_values_dummy_storage);
+ gl.getNamedFramebufferParameteriv(0, GL_FRAMEBUFFER_DEFAULT_WIDTH, return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_OPERATION, "GetNamedFramebufferParameteriv",
"a default framebuffer is queried, and pname is not one of DOUBLEBUFFER, "
/* Prepare objects. */
PrepareObjects();
- glw::GLint return_values_dummy_storage[4];
+ glw::GLint return_values_unused_storage[4];
/* Check that GL_INVALID_OPERATION is generated by
GetNamedFramebufferAttachmentParameteriv if framebuffer is not zero or
the name of an existing framebuffer object. */
gl.getNamedFramebufferAttachmentParameteriv(
- m_fbo_invalid, GL_COLOR_ATTACHMENT0, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_dummy_storage);
+ m_fbo_invalid, GL_COLOR_ATTACHMENT0, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_OPERATION, "GetNamedFramebufferAttachmentParameteriv",
"framebuffer is not zero or the name of an existing framebuffer object.");
GetNamedFramebufferAttachmentParameteriv if pname is not valid for the
value of GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE, as described above. */
gl.getNamedFramebufferAttachmentParameteriv(
- m_fbo_valid, GL_COLOR_ATTACHMENT0, GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL, return_values_dummy_storage);
+ m_fbo_valid, GL_COLOR_ATTACHMENT0, GL_FRAMEBUFFER_ATTACHMENT_TEXTURE_LEVEL, return_values_unused_storage);
is_ok &= ExpectError(
GL_INVALID_ENUM, "GetNamedFramebufferAttachmentParameteriv",
is not one of the attachments in table 9.2 (COLOR_ATTACHMENTi, DEPTH_ATTACHMENT, STENCIL_ATTACHMENT, DEPTH_STENCIL_ATTACHMENT), and attachment is not
COLOR_ATTACHMENTm where m is greater than or equal to the value of MAX_COLOR_ATTACHMENTS. */
gl.getNamedFramebufferAttachmentParameteriv(
- m_fbo_valid, m_attachment_invalid, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_dummy_storage);
+ m_fbo_valid, m_attachment_invalid, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_unused_storage);
is_ok &= ExpectError(
GL_INVALID_ENUM, "GetNamedFramebufferAttachmentParameteriv",
FRAMEBUFFER_ATTACHMENT_OBJECT_NAME or
FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE. */
gl.getNamedFramebufferAttachmentParameteriv(
- m_fbo_valid, GL_COLOR_ATTACHMENT1, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_dummy_storage);
+ m_fbo_valid, GL_COLOR_ATTACHMENT1, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_OPERATION, "GetNamedFramebufferAttachmentParameteriv",
"the value of FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE is GL_NONE and pname is not "
DEPTH_STENCIL_ATTACHMENT and pname is FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE. */
gl.getNamedFramebufferAttachmentParameteriv(m_fbo_valid, GL_DEPTH_STENCIL_ATTACHMENT,
GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE,
- return_values_dummy_storage);
+ return_values_unused_storage);
is_ok &=
ExpectError(GL_INVALID_OPERATION, "GetNamedFramebufferAttachmentParameteriv",
queried and attachment is not one the values FRONT, FRONT_LEFT, FRONT_RIGHT,
BACK, BACK_LEFT, BACK_RIGHT, DEPTH, STENCIL. */
gl.getNamedFramebufferAttachmentParameteriv(
- 0, m_default_attachment_invalid, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_dummy_storage);
+ 0, m_default_attachment_invalid, GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE, return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_ENUM, "GetNamedFramebufferAttachmentParameteriv",
"the default framebuffer is queried and attachment is not one the values FRONT, "
equal to the value of MAX_COLOR_ATTACHMENTS. */
gl.getNamedFramebufferAttachmentParameteriv(m_fbo_valid, GL_COLOR_ATTACHMENT0 + m_max_color_attachments,
GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE,
- return_values_dummy_storage);
+ return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_OPERATION, "GetNamedFramebufferAttachmentParameteriv",
"a framebuffer object is bound to target and attachment is COLOR_ATTACHMENTm where m is "
gl.getNamedFramebufferAttachmentParameteriv(m_fbo_valid, GL_COLOR_ATTACHMENT0 + m_max_color_attachments + 1,
GL_FRAMEBUFFER_ATTACHMENT_COMPONENT_TYPE,
- return_values_dummy_storage);
+ return_values_unused_storage);
is_ok &= ExpectError(GL_INVALID_OPERATION, "GetNamedFramebufferAttachmentParameteriv",
"a framebuffer object is bound to target and attachment is COLOR_ATTACHMENTm where m is "
/* Prepare objects. */
PrepareObjects();
- glw::GLint return_value_dummy_storage;
+ glw::GLint return_value_unused_storage;
/* Check that INVALID_OPERATION is generated by
GetNamedRenderbufferParameteriv if renderbuffer is not the name of an
existing renderbuffer object. */
- gl.getNamedRenderbufferParameteriv(m_rbo_invalid, GL_RENDERBUFFER_WIDTH, &return_value_dummy_storage);
+ gl.getNamedRenderbufferParameteriv(m_rbo_invalid, GL_RENDERBUFFER_WIDTH, &return_value_unused_storage);
is_ok &= ExpectError(GL_INVALID_OPERATION, "GetNamedRenderbufferParameteriv",
"renderbuffer is not the name of an existing renderbuffer object.");
/* Check that INVALID_ENUM is generated by GetNamedRenderbufferParameteriv
if parameter name is not one of the accepted parameter names described
in specification. */
- gl.getNamedRenderbufferParameteriv(m_rbo_valid, m_parameter_invalid, &return_value_dummy_storage);
+ gl.getNamedRenderbufferParameteriv(m_rbo_valid, m_parameter_invalid, &return_value_unused_storage);
is_ok &= ExpectError(GL_INVALID_ENUM, "GetNamedRenderbufferParameteriv",
"parameter name is not one of the accepted parameter names described in specification.");
glw::GLuint vao = 0;
glw::GLuint not_a_vao = 0;
- /* Dummy storage. */
+ /* Unused storage. */
glw::GLint storage = 0;
glw::GLint64 storage64 = 0;
while (GL_TRUE == gl.isTransformFeedback(++invalid_name))
;
- /* Dummy storage. */
+ /* unused storage. */
glw::GLint buffer = 314159;
glw::GLint64 buffer64 = 314159;
/* Generating invalid parameter name. */
glw::GLuint invalid_parameter_name = 0;
- /* Dummy storage. */
+ /* Unused storage. */
glw::GLint buffer = 314159;
/* Error variable. */
/* Generating invalid parameter name. */
glw::GLuint invalid_parameter_name = 0;
- /* Dummy storage. */
+ /* Unused storage. */
glw::GLint buffer = 314159;
/* Error variable. */
/* Generating invalid parameter name. */
glw::GLuint invalid_parameter_name = 0;
- /* Dummy storage. */
+ /* Unused storage. */
glw::GLint64 buffer = 314159;
/* Error variable. */
gl.createTransformFeedbacks(1, &xfb);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateTransformFeedbacks have failed");
- /* Dummy storage. */
+ /* Unused storage. */
glw::GLint buffer = 314159;
glw::GLint64 buffer64 = 314159;
/* Shortcut for GL functionality */
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
- /* Dummy storage. */
+ /* Unused storage. */
glw::GLint value = 314159;
/* Test of GetTransformFeedbackiv. */
const glw::GLchar* gl4cts::es31compatibility::ShaderFunctionalCompatibilityTest::s_vertex_shader_body =
"\n"
- "out highp float dummy;\n"
+ "out highp float vout;\n"
"\n"
"void main()\n"
"{\n"
" break;\n"
" }\n"
"\n"
- " dummy = float(gl_VertexID % 4);\n /* Always less than 4. */"
+ " vout = float(gl_VertexID % 4);\n /* Always less than 4. */"
"}\n";
const glw::GLchar* gl4cts::es31compatibility::ShaderFunctionalCompatibilityTest::s_fragment_shader_body =
"\n"
- "in highp float dummy;\n"
+ "in highp float vout;\n"
"\n"
"out highp vec4 result;\n"
"\n"
"{\n"
" TTYPE a = LEFT;\n"
" TTYPE b = RIGHT;\n"
- " BTYPE c = BDATA && BTYPE(dummy < 4.0);\n /* Making sure that expression is not compile time constant. */"
+ " BTYPE c = BDATA && BTYPE(vout < 4.0);\n /* Making sure that expression is not compile time constant. */"
"\n"
" TTYPE mixed = mix(a, b, c);\n"
"\n"
"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
"\n"
"LAYOUTuniform Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"writeonly uniform image2D uni_image;\n"
"\n"
"void main()\n"
"{\n"
- " vec4 result = uni_block.boy + uni_block.man;\n"
+ " vec4 result = uni_block.b + uni_block.a;\n"
"\n"
" imageStore(uni_image, ivec2(gl_GlobalInvocationID.xy), result);\n"
"}\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"LAYOUTuniform Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 gs_fs;\n"
"\n"
"void main()\n"
"{\n"
- " fs_out = gs_fs + uni_block.boy + uni_block.man;\n"
+ " fs_out = gs_fs + uni_block.b + uni_block.a;\n"
"}\n"
"\n";
static const GLchar* gs = "#version 430 core\n"
"layout(triangle_strip, max_vertices = 4) out;\n"
"\n"
"LAYOUTuniform Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 tes_gs[];\n"
"\n"
"void main()\n"
"{\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(-1, -1, 0, 1);\n"
" EmitVertex();\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(-1, 1, 0, 1);\n"
" EmitVertex();\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(1, -1, 0, 1);\n"
" EmitVertex();\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(1, 1, 0, 1);\n"
" EmitVertex();\n"
"}\n"
"layout(vertices = 1) out;\n"
"\n"
"LAYOUTuniform Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 vs_tcs[];\n"
"void main()\n"
"{\n"
"\n"
- " tcs_tes[gl_InvocationID] = vs_tcs[gl_InvocationID] + uni_block.boy + uni_block.man;\n"
+ " tcs_tes[gl_InvocationID] = vs_tcs[gl_InvocationID] + uni_block.b + uni_block.a;\n"
"\n"
" gl_TessLevelOuter[0] = 1.0;\n"
" gl_TessLevelOuter[1] = 1.0;\n"
"layout(isolines, point_mode) in;\n"
"\n"
"LAYOUTuniform Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 tcs_tes[];\n"
"\n"
"void main()\n"
"{\n"
- " tes_gs = tcs_tes[0] + uni_block.boy + uni_block.man;\n"
+ " tes_gs = tcs_tes[0] + uni_block.b + uni_block.a;\n"
"}\n"
"\n";
static const GLchar* vs = "#version 430 core\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"LAYOUTuniform Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 in_vs;\n"
"\n"
"void main()\n"
"{\n"
- " vs_tcs = in_vs + uni_block.boy + uni_block.man;\n"
+ " vs_tcs = in_vs + uni_block.b + uni_block.a;\n"
"}\n"
"\n";
"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
"\n"
"layout (std140) uniform Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"writeonly uniform image2D uni_image;\n"
"{\n"
" vec4 result = vec4(1, 0, 0.5, 1);\n"
"\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" result = vec4(1, 1, 1, 1);\n"
" }\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) uniform Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 gs_fs;\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" fs_out = vec4(1, 1, 1, 1);\n"
" }\n"
"layout(triangle_strip, max_vertices = 4) out;\n"
"\n"
"layout (std140) uniform Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tes_gs[];\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" gs_fs = vec4(1, 1, 1, 1);\n"
" }\n"
"layout(vertices = 1) out;\n"
"\n"
"layout (std140) uniform Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 vs_tcs[];\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" tcs_tes[gl_InvocationID] = vec4(1, 1, 1, 1);\n"
" }\n"
"layout(isolines, point_mode) in;\n"
"\n"
"layout (std140) uniform Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tcs_tes[];\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" tes_gs = vec4(1, 1, 1, 1);\n"
" }\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) uniform Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 in_vs;\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" vs_tcs = vec4(1, 1, 1, 1);\n"
" }\n"
if (test_case.m_stage == stage)
{
GLchar buffer[16];
- const GLuint boy_offset = test_case.m_boy_offset;
- const Utils::Type& boy_type = test_case.m_boy_type;
- const GLchar* boy_type_name = boy_type.GetGLSLTypeName();
- const GLuint man_offset = test_case.m_man_offset;
- const Utils::Type& man_type = test_case.m_man_type;
- const GLchar* man_type_name = man_type.GetGLSLTypeName();
+ const GLuint b_offset = test_case.m_b_offset;
+ const Utils::Type& b_type = test_case.m_b_type;
+ const GLchar* b_type_name = b_type.GetGLSLTypeName();
+ const GLuint a_offset = test_case.m_a_offset;
+ const Utils::Type& a_type = test_case.m_a_type;
+ const GLchar* a_type_name = a_type.GetGLSLTypeName();
size_t position = 0;
switch (stage)
TCU_FAIL("Invalid enum");
}
- sprintf(buffer, "%d", boy_offset);
- Utils::replaceToken("BOY_OFFSET", position, buffer, source);
- Utils::replaceToken("BOY_TYPE", position, boy_type_name, source);
- sprintf(buffer, "%d", man_offset);
- Utils::replaceToken("MAN_OFFSET", position, buffer, source);
- Utils::replaceToken("MAN_TYPE", position, man_type_name, source);
- Utils::replaceToken("BOY_TYPE", position, boy_type_name, source);
- Utils::replaceToken("MAN_TYPE", position, man_type_name, source);
+ sprintf(buffer, "%d", b_offset);
+ Utils::replaceToken("B_OFFSET", position, buffer, source);
+ Utils::replaceToken("B_TYPE", position, b_type_name, source);
+ sprintf(buffer, "%d", a_offset);
+ Utils::replaceToken("A_OFFSET", position, buffer, source);
+ Utils::replaceToken("A_TYPE", position, a_type_name, source);
+ Utils::replaceToken("B_TYPE", position, b_type_name, source);
+ Utils::replaceToken("A_TYPE", position, a_type_name, source);
}
else
{
std::stringstream stream;
testCase& test_case = m_test_cases[test_case_index];
- stream << "Type: " << test_case.m_boy_type.GetGLSLTypeName() << ", offset: " << test_case.m_boy_offset
- << ". Type: " << test_case.m_man_type.GetGLSLTypeName() << ", offset: " << test_case.m_man_offset;
+ stream << "Type: " << test_case.m_b_type.GetGLSLTypeName() << ", offset: " << test_case.m_b_offset
+ << ". Type: " << test_case.m_a_type.GetGLSLTypeName() << ", offset: " << test_case.m_a_offset;
return stream.str();
}
for (GLuint i = 0; i < n_types; ++i)
{
- const Utils::Type& boy_type = getType(i);
- const GLuint boy_size = boy_type.GetActualAlignment(1 /* align */, false /* is_array*/);
+ const Utils::Type& b_type = getType(i);
+ const GLuint b_size = b_type.GetActualAlignment(1 /* align */, false /* is_array*/);
for (GLuint j = 0; j < n_types; ++j)
{
- const Utils::Type& man_type = getType(j);
- const GLuint man_align = man_type.GetBaseAlignment(false);
- const GLuint man_size = man_type.GetActualAlignment(1 /* align */, false /* is_array*/);
+ const Utils::Type& a_type = getType(j);
+ const GLuint a_align = a_type.GetBaseAlignment(false);
+ const GLuint a_size = a_type.GetActualAlignment(1 /* align */, false /* is_array*/);
- const GLuint boy_offset = lcm(boy_size, man_size);
- const GLuint man_after_start = boy_offset + 1;
- const GLuint man_after_off = man_type.GetActualOffset(man_after_start, man_size);
- const GLuint man_before_start = boy_offset - man_align;
- const GLuint man_before_off = man_type.GetActualOffset(man_before_start, man_size);
+ const GLuint b_offset = lcm(b_size, a_size);
+ const GLuint a_after_start = b_offset + 1;
+ const GLuint a_after_off = a_type.GetActualOffset(a_after_start, a_size);
+ const GLuint a_before_start = b_offset - a_align;
+ const GLuint a_before_off = a_type.GetActualOffset(a_before_start, a_size);
for (GLuint stage = 0; stage < Utils::Shader::STAGE_MAX; ++stage)
{
continue;
}
- if ((boy_offset > man_before_off) && (boy_offset < man_before_off + man_size))
+ if ((b_offset > a_before_off) && (b_offset < a_before_off + a_size))
{
- testCase test_case = { boy_offset, boy_type, man_before_off, man_type,
+ testCase test_case = { b_offset, b_type, a_before_off, a_type,
(Utils::Shader::STAGES)stage };
m_test_cases.push_back(test_case);
}
- if ((boy_offset < man_after_off) && (boy_offset + boy_size > man_after_off))
+ if ((b_offset < a_after_off) && (b_offset + b_size > a_after_off))
{
- testCase test_case = { boy_offset, boy_type, man_after_off, man_type,
+ testCase test_case = { b_offset, b_type, a_after_off, a_type,
(Utils::Shader::STAGES)stage };
m_test_cases.push_back(test_case);
}
- /* Boy offset, should be fine for both types */
- testCase test_case = { boy_offset, boy_type, boy_offset, man_type, (Utils::Shader::STAGES)stage };
+ /* b offset, should be fine for both types */
+ testCase test_case = { b_offset, b_type, b_offset, a_type, (Utils::Shader::STAGES)stage };
m_test_cases.push_back(test_case);
}
"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
"\n"
"layout (std140) uniform Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"writeonly uniform image2D uni_image;\n"
"{\n"
" vec4 result = vec4(1, 0, 0.5, 1);\n"
"\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " result = vec4(1, 1, 1, 1) - block.boy;\n"
+ " result = vec4(1, 1, 1, 1) - block.b;\n"
" }\n"
"\n"
" imageStore(uni_image, ivec2(gl_GlobalInvocationID.xy), result);\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) uniform Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 gs_fs;\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " fs_out = block.boy;\n"
+ " fs_out = block.b;\n"
" }\n"
"\n"
" fs_out += gs_fs;\n"
"layout(triangle_strip, max_vertices = 4) out;\n"
"\n"
"layout (std140) uniform Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tes_gs[];\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " gs_fs = block.boy;\n"
+ " gs_fs = block.b;\n"
" }\n"
"\n"
" gs_fs += tes_gs[0];\n"
"layout(vertices = 1) out;\n"
"\n"
"layout (std140) uniform Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 vs_tcs[];\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " tcs_tes[gl_InvocationID] = block.boy;\n"
+ " tcs_tes[gl_InvocationID] = block.b;\n"
" }\n"
"\n"
"\n"
"layout(isolines, point_mode) in;\n"
"\n"
"layout (std140) uniform Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tcs_tes[];\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " tes_gs = block.boy;\n"
+ " tes_gs = block.b;\n"
" }\n"
"\n"
" tes_gs += tcs_tes[0];\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) uniform Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 in_vs;\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " vs_tcs = block.boy;\n"
+ " vs_tcs = block.b;\n"
" }\n"
"\n"
" vs_tcs += in_vs;\n"
"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
"\n"
"layout (QUALIFIERbinding = BINDING) buffer cs_Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"writeonly uniform image2D uni_image;\n"
"\n"
"void main()\n"
"{\n"
- " vec4 result = uni_block.boy + uni_block.man;\n"
+ " vec4 result = uni_block.b + uni_block.a;\n"
"\n"
" imageStore(uni_image, ivec2(gl_GlobalInvocationID.xy), result);\n"
"}\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (QUALIFIERbinding = BINDING) buffer Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 gs_fs;\n"
"\n"
"void main()\n"
"{\n"
- " fs_out = gs_fs + uni_block.boy + uni_block.man;\n"
+ " fs_out = gs_fs + uni_block.b + uni_block.a;\n"
"}\n"
"\n";
static const GLchar* gs = "#version 430 core\n"
"layout(triangle_strip, max_vertices = 4) out;\n"
"\n"
"layout (QUALIFIERbinding = BINDING) buffer gs_Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 tes_gs[];\n"
"\n"
"void main()\n"
"{\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(-1, -1, 0, 1);\n"
" EmitVertex();\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(-1, 1, 0, 1);\n"
" EmitVertex();\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(1, -1, 0, 1);\n"
" EmitVertex();\n"
- " gs_fs = tes_gs[0] + uni_block.boy + uni_block.man;\n"
+ " gs_fs = tes_gs[0] + uni_block.b + uni_block.a;\n"
" gl_Position = vec4(1, 1, 0, 1);\n"
" EmitVertex();\n"
"}\n"
"layout(vertices = 1) out;\n"
"\n"
"layout (QUALIFIERbinding = BINDING) buffer tcs_Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 vs_tcs[];\n"
"void main()\n"
"{\n"
"\n"
- " tcs_tes[gl_InvocationID] = vs_tcs[gl_InvocationID] + uni_block.boy + uni_block.man;\n"
+ " tcs_tes[gl_InvocationID] = vs_tcs[gl_InvocationID] + uni_block.b + uni_block.a;\n"
"\n"
" gl_TessLevelOuter[0] = 1.0;\n"
" gl_TessLevelOuter[1] = 1.0;\n"
"layout(isolines, point_mode) in;\n"
"\n"
"layout (QUALIFIERbinding = BINDING) buffer tes_Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 tcs_tes[];\n"
"\n"
"void main()\n"
"{\n"
- " tes_gs = tcs_tes[0] + uni_block.boy + uni_block.man;\n"
+ " tes_gs = tcs_tes[0] + uni_block.b + uni_block.a;\n"
"}\n"
"\n";
static const GLchar* vs = "#version 430 core\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (QUALIFIERbinding = BINDING) buffer vs_Block {\n"
- " layout(offset = 16) vec4 boy;\n"
- " layout(align = 64) vec4 man;\n"
+ " layout(offset = 16) vec4 b;\n"
+ " layout(align = 64) vec4 a;\n"
"} uni_block;\n"
"\n"
"in vec4 in_vs;\n"
"\n"
"void main()\n"
"{\n"
- " vs_tcs = in_vs + uni_block.boy + uni_block.man;\n"
+ " vs_tcs = in_vs + uni_block.b + uni_block.a;\n"
"}\n"
"\n";
"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
"\n"
"layout (std140) buffer Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"writeonly uniform image2D uni_image;\n"
"{\n"
" vec4 result = vec4(1, 0, 0.5, 1);\n"
"\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" result = vec4(1, 1, 1, 1);\n"
" }\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) buffer Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 gs_fs;\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" fs_out = vec4(1, 1, 1, 1);\n"
" }\n"
"layout(triangle_strip, max_vertices = 4) out;\n"
"\n"
"layout (std140) buffer Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tes_gs[];\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" gs_fs = vec4(1, 1, 1, 1);\n"
" }\n"
"layout(vertices = 1) out;\n"
"\n"
"layout (std140) buffer Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 vs_tcs[];\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" tcs_tes[gl_InvocationID] = vec4(1, 1, 1, 1);\n"
" }\n"
"layout(isolines, point_mode) in;\n"
"\n"
"layout (std140) buffer Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tcs_tes[];\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" tes_gs = vec4(1, 1, 1, 1);\n"
" }\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) buffer Block {\n"
- " layout (offset = BOY_OFFSET) BOY_TYPE boy;\n"
- " layout (offset = MAN_OFFSET) MAN_TYPE man;\n"
+ " layout (offset = B_OFFSET) B_TYPE b;\n"
+ " layout (offset = A_OFFSET) A_TYPE a;\n"
"} block;\n"
"\n"
"in vec4 in_vs;\n"
"\n"
"void main()\n"
"{\n"
- " if ((BOY_TYPE(1) == block.boy) ||\n"
- " (MAN_TYPE(0) == block.man) )\n"
+ " if ((B_TYPE(1) == block.b) ||\n"
+ " (A_TYPE(0) == block.a) )\n"
" {\n"
" vs_tcs = vec4(1, 1, 1, 1);\n"
" }\n"
if (test_case.m_stage == stage)
{
GLchar buffer[16];
- const GLuint boy_offset = test_case.m_boy_offset;
- const Utils::Type& boy_type = test_case.m_boy_type;
- const GLchar* boy_type_name = boy_type.GetGLSLTypeName();
- const GLuint man_offset = test_case.m_man_offset;
- const Utils::Type& man_type = test_case.m_man_type;
- const GLchar* man_type_name = man_type.GetGLSLTypeName();
- size_t position = 0;
+ const GLuint b_offset = test_case.m_b_offset;
+ const Utils::Type& b_type = test_case.m_b_type;
+ const GLchar* b_type_name = b_type.GetGLSLTypeName();
+ const GLuint a_offset = test_case.m_a_offset;
+ const Utils::Type& a_type = test_case.m_a_type;
+ const GLchar* a_type_name = a_type.GetGLSLTypeName();
+ size_t position = 0;
switch (stage)
{
TCU_FAIL("Invalid enum");
}
- sprintf(buffer, "%d", boy_offset);
- Utils::replaceToken("BOY_OFFSET", position, buffer, source);
- Utils::replaceToken("BOY_TYPE", position, boy_type_name, source);
- sprintf(buffer, "%d", man_offset);
- Utils::replaceToken("MAN_OFFSET", position, buffer, source);
- Utils::replaceToken("MAN_TYPE", position, man_type_name, source);
- Utils::replaceToken("BOY_TYPE", position, boy_type_name, source);
- Utils::replaceToken("MAN_TYPE", position, man_type_name, source);
+ sprintf(buffer, "%d", b_offset);
+ Utils::replaceToken("B_OFFSET", position, buffer, source);
+ Utils::replaceToken("B_TYPE", position, b_type_name, source);
+ sprintf(buffer, "%d", a_offset);
+ Utils::replaceToken("A_OFFSET", position, buffer, source);
+ Utils::replaceToken("A_TYPE", position, a_type_name, source);
+ Utils::replaceToken("B_TYPE", position, b_type_name, source);
+ Utils::replaceToken("A_TYPE", position, a_type_name, source);
}
else
{
"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
"\n"
"layout (std140) buffer Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"writeonly uniform image2D uni_image;\n"
"{\n"
" vec4 result = vec4(1, 0, 0.5, 1);\n"
"\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " result = vec4(1, 1, 1, 1) - block.boy;\n"
+ " result = vec4(1, 1, 1, 1) - block.b;\n"
" }\n"
"\n"
" imageStore(uni_image, ivec2(gl_GlobalInvocationID.xy), result);\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) buffer Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 gs_fs;\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " fs_out = block.boy;\n"
+ " fs_out = block.b;\n"
" }\n"
"\n"
" fs_out += gs_fs;\n"
"layout(triangle_strip, max_vertices = 4) out;\n"
"\n"
"layout (std140) buffer Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tes_gs[];\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " gs_fs = block.boy;\n"
+ " gs_fs = block.b;\n"
" }\n"
"\n"
" gs_fs += tes_gs[0];\n"
"layout(vertices = 1) out;\n"
"\n"
"layout (std140) buffer Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 vs_tcs[];\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " tcs_tes[gl_InvocationID] = block.boy;\n"
+ " tcs_tes[gl_InvocationID] = block.b;\n"
" }\n"
"\n"
"\n"
"layout(isolines, point_mode) in;\n"
"\n"
"layout (std140) buffer Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 tcs_tes[];\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " tes_gs = block.boy;\n"
+ " tes_gs = block.b;\n"
" }\n"
"\n"
" tes_gs += tcs_tes[0];\n"
"#extension GL_ARB_enhanced_layouts : require\n"
"\n"
"layout (std140) buffer Block {\n"
- " vec4 boy;\n"
- " layout (align = ALIGN) TYPE man;\n"
+ " vec4 b;\n"
+ " layout (align = ALIGN) TYPE a;\n"
"} block;\n"
"\n"
"in vec4 in_vs;\n"
"\n"
"void main()\n"
"{\n"
- " if (TYPE(0) == block.man)\n"
+ " if (TYPE(0) == block.a)\n"
" {\n"
- " vs_tcs = block.boy;\n"
+ " vs_tcs = block.b;\n"
" }\n"
"\n"
" vs_tcs += in_vs;\n"
* Test following code snippet with all shader stages:
*
* layout(QUALIFIER) uniform Block {
- * layout(offset = 16) vec4 boy;
- * layout(align = 48) vec4 man;
+ * layout(offset = 16) vec4 b;
+ * layout(align = 48) vec4 a;
* };
*
* Test following block qualifiers and all types:
* Use following code snippet:
*
* layout (std140) uniform Block {
- * layout (offset = boy_offset) boy_type boy;
- * layout (offset = man_offset) man_type man;
+ * layout (offset = b_offset) b_type b;
+ * layout (offset = a_offset) a_type a;
* };
*
* It is expected that overlapping members will cause compilation failure.
/* Protected types */
struct testCase
{
- glw::GLuint m_boy_offset;
- Utils::Type m_boy_type;
- glw::GLuint m_man_offset;
- Utils::Type m_man_type;
+ glw::GLuint m_b_offset;
+ Utils::Type m_b_type;
+ glw::GLuint m_a_offset;
+ Utils::Type m_a_type;
Utils::Shader::STAGES m_stage;
};
* Test following code snippet:
*
* layout (std140, offset = 8) uniform Block {
- * vec4 boy;
- * layout (align = man_alignment) type man;
+ * vec4 b;
+ * layout (align = a_alignment) type a;
* };
*
- * It is expected that compilation will fail whenever man_alignment is not
+ * It is expected that compilation will fail whenever a_alignment is not
* a power of 2.
*
* Test all alignment in range <0, sizeof(dmat4)>. Test all shader stages.
/* Uniform block declaration with std140 offsets calculated
* | align | loc_req | begins | ends | offset in bytes | imp |
- * ivec3 dummy1[3] | 4 | 12 | 0 | 12 | 0 | |
+ * ivec3 unused1[3] | 4 | 12 | 0 | 12 | 0 | |
* double double_value | 2 | 2 | 12 | 14 | 48 | XXX |
- * bool dummy2 | 1 | 1 | 14 | 15 | 56 | |
+ * bool unused2 | 1 | 1 | 14 | 15 | 56 | |
* dvec2 dvec2_value | 4 | 4 | 16 | 20 | 64 | XXX |
- * bvec3 dummy3 | 4 | 4 | 20 | 24 | 80 | |
+ * bvec3 unused3 | 4 | 4 | 20 | 24 | 80 | |
* dvec3 dvec3_value | 8 | 8 | 24 | 32 | 96 | XXX |
- * int dummy4[3] | 4 | 12 | 32 | 44 | 128 | |
+ * int unused4[3] | 4 | 12 | 32 | 44 | 128 | |
* dvec4 dvec4_value | 8 | 8 | 48 | 56 | 192 | XXX |
- * bool dummy5 | 1 | 1 | 56 | 57 | 224 | |
- * bool dummy6[2] | 4 | 8 | 60 | 68 | 240 | |
+ * bool unused5 | 1 | 1 | 56 | 57 | 224 | |
+ * bool unused6[2] | 4 | 8 | 60 | 68 | 240 | |
* dmat2 dmat2_value | 4 | 8 | 68 | 76 | 272 | XXX |
* dmat3 dmat3_value | 8 | 24 | 80 | 104 | 320 | XXX |
- * bool dummy7 | 1 | 1 | 104 | 105 | 416 | |
+ * bool unused7 | 1 | 1 | 104 | 105 | 416 | |
* dmat4 dmat4_value | 8 | 32 | 112 | 144 | 448 | XXX |
* dmat2x3 dmat2x3_value | 8 | 16 | 144 | 160 | 576 | XXX |
- * uvec3 dummy8 | 4 | 4 | 160 | 164 | 640 | |
+ * uvec3 unused8 | 4 | 4 | 160 | 164 | 640 | |
* dmat2x4 dmat2x4_value | 8 | 16 | 168 | 184 | 672 | XXX |
* dmat3x2 dmat3x2_value | 4 | 12 | 184 | 196 | 736 | XXX |
- * bool dummy9 | 1 | 1 | 196 | 197 | 784 | |
+ * bool unused9 | 1 | 1 | 196 | 197 | 784 | |
* dmat3x4 dmat3x4_value | 8 | 24 | 200 | 224 | 800 | XXX |
- * int dummy10 | 1 | 1 | 224 | 225 | 896 | |
+ * int unused10 | 1 | 1 | 224 | 225 | 896 | |
* dmat4x2 dmat4x2_value | 4 | 16 | 228 | 244 | 912 | XXX |
* dmat4x3 dmat4x3_value | 8 | 32 | 248 | 280 | 992 | XXX |
*/
stream << "layout(" << layout << ") uniform " << m_uniform_block_name << "\n"
"{\n"
- " ivec3 dummy1[3];\n"
+ " ivec3 unused1[3];\n"
" double double_value;\n"
- " bool dummy2;\n"
+ " bool unused2;\n"
" dvec2 dvec2_value;\n"
- " bvec3 dummy3;\n"
+ " bvec3 unused3;\n"
" dvec3 dvec3_value;\n"
- " int dummy4[3];\n"
+ " int unused4[3];\n"
" dvec4 dvec4_value;\n"
- " bool dummy5;\n"
- " bool dummy6[2];\n"
+ " bool unused5;\n"
+ " bool unused6[2];\n"
" dmat2 dmat2_value;\n"
" dmat3 dmat3_value;\n"
- " bool dummy7;\n"
+ " bool unused7;\n"
" dmat4 dmat4_value;\n"
" dmat2x3 dmat2x3_value;\n"
- " uvec3 dummy8;\n"
+ " uvec3 unused8;\n"
" dmat2x4 dmat2x4_value;\n"
" dmat3x2 dmat3x2_value;\n"
- " bool dummy9;\n"
+ " bool unused9;\n"
" dmat3x4 dmat3x4_value;\n"
- " int dummy10;\n"
+ " int unused10;\n"
" dmat4x2 dmat4x2_value;\n"
" dmat4x3 dmat4x3_value;\n"
"} "
* in a named uniform block.
* The following members should be defined in the block:
*
- * ivec3 dummy1[3];
+ * ivec3 unused1[3];
* double double_value;
- * bool dummy2;
+ * bool unused2;
* dvec2 dvec2_value;
- * bvec3 dummy3;
+ * bvec3 unused3;
* dvec3 dvec3_value;
- * int dummy4[3];
+ * int unused4[3];
* dvec4 dvec4_value;
- * bool dummy5;
- * bool dummy6[2];
+ * bool unused5;
+ * bool unused6[2];
* dmat2 dmat2_value;
* dmat3 dmat3_value;
- * bool dummy7;
+ * bool unused7;
* dmat4 dmat4_value;
* dmat2x3 dmat2x3_value;
- * uvec3 dummy8;
+ * uvec3 unused8;
* dmat2x4 dmat2x4_value;
* dmat3x2 dmat3x2_value;
- * bool dummy9;
+ * bool unused9;
* dmat3x4 dmat3x4_value;
- * int dummy10;
+ * int unused10;
* dmat4x2 dmat4x2_value;
* dmat4x3 dmat4x3_value;
*
namespace glcts
{
-const char* PipelineStatisticsQueryUtilities::dummy_cs_code =
+const char* PipelineStatisticsQueryUtilities::minimal_cs_code =
"#version 430\n"
"\n"
"layout(local_size_x=1, local_size_y = 1, local_size_z = 1) in;\n"
"{\n"
" atomicCounterIncrement(test_counter);\n"
"}\n";
-const char* PipelineStatisticsQueryUtilities::dummy_fs_code = "#version 130\n"
+
+const char* PipelineStatisticsQueryUtilities::minimal_cs_code_arb =
+ "#version 330\n"
+ "#extension GL_ARB_compute_shader : require\n"
+ "#extension GL_ARB_shader_atomic_counters : require\n"
+ "\n"
+ "layout(local_size_x=1, local_size_y = 1, local_size_z = 1) in;\n"
+ "\n"
+ "layout(binding = 0) uniform atomic_uint test_counter;\n"
+ "\n"
+ "void main()\n"
+ "{\n"
+ " atomicCounterIncrement(test_counter);\n"
+ "}\n";
+const char* PipelineStatisticsQueryUtilities::minimal_fs_code = "#version 130\n"
"\n"
"out vec4 result;\n"
"\n"
"{\n"
" result = gl_FragCoord;\n"
"}\n";
-const char* PipelineStatisticsQueryUtilities::dummy_tc_code =
+const char* PipelineStatisticsQueryUtilities::minimal_tc_code =
"#version 400\n"
"\n"
"layout(vertices = 3) out;\n"
" gl_TessLevelOuter[2] = 5.0;\n"
" gl_TessLevelOuter[3] = 6.0;\n"
"}\n";
-const char* PipelineStatisticsQueryUtilities::dummy_te_code =
+const char* PipelineStatisticsQueryUtilities::minimal_te_code =
"#version 400\n"
"\n"
"layout(triangles) in;\n"
"{\n"
" gl_Position = gl_TessCoord.xyxy * gl_in[gl_PrimitiveID].gl_Position;\n"
"}\n";
-const char* PipelineStatisticsQueryUtilities::dummy_vs_code = "#version 130\n"
+const char* PipelineStatisticsQueryUtilities::minimal_vs_code = "#version 130\n"
"\n"
"in vec4 position;\n"
"\n"
deinitObjects();
}
-/** Dummy method that should be overloaded by inheriting methods.
+/** Empty method that should be overloaded by inheriting methods.
*
* The method can be thought as of a placeholder for code that deinitializes
* test-specific GL objects.
GLU_EXPECT_NO_ERROR(gl.getError(), "glViewport() call failed.");
}
-/** A dummy method, which can be thought of as a placeholder to initialize
+/** An empty method, which can be thought of as a placeholder to initialize
* test-specific GL objects.
**/
void PipelineStatisticsQueryTestFunctionalBase::initObjects()
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
buildProgram(DE_NULL, /* cs_body */
- PipelineStatisticsQueryUtilities::dummy_fs_code, DE_NULL, /* gs_body */
+ PipelineStatisticsQueryUtilities::minimal_fs_code, DE_NULL, /* gs_body */
DE_NULL, /* tc_body */
DE_NULL, /* te_body */
- PipelineStatisticsQueryUtilities::dummy_vs_code);
+ PipelineStatisticsQueryUtilities::minimal_vs_code);
gl.useProgram(m_po_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUseProgram() call failed.");
DE_NULL, /* gs_body */
DE_NULL, /* tc_body */
DE_NULL, /* te_body */
- PipelineStatisticsQueryUtilities::dummy_vs_code);
+ PipelineStatisticsQueryUtilities::minimal_vs_code);
gl.useProgram(m_po_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUseProgram() call failed.");
}
buildProgram(DE_NULL, /* cs_body */
- PipelineStatisticsQueryUtilities::dummy_fs_code, DE_NULL, /* gs_body */
- PipelineStatisticsQueryUtilities::dummy_tc_code, PipelineStatisticsQueryUtilities::dummy_te_code,
- PipelineStatisticsQueryUtilities::dummy_vs_code);
+ PipelineStatisticsQueryUtilities::minimal_fs_code, DE_NULL, /* gs_body */
+ PipelineStatisticsQueryUtilities::minimal_tc_code, PipelineStatisticsQueryUtilities::minimal_te_code,
+ PipelineStatisticsQueryUtilities::minimal_vs_code);
gl.useProgram(m_po_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUseProgram() call failed.");
}
buildProgram(DE_NULL, /* cs_body */
- PipelineStatisticsQueryUtilities::dummy_fs_code, gs_body.c_str(), DE_NULL, /* tc_body */
+ PipelineStatisticsQueryUtilities::minimal_fs_code, gs_body.c_str(), DE_NULL, /* tc_body */
DE_NULL, /* te_body */
- PipelineStatisticsQueryUtilities::dummy_vs_code);
+ PipelineStatisticsQueryUtilities::minimal_vs_code);
gl.useProgram(m_po_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUseProgram() call failed.");
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
buildProgram(DE_NULL, /* cs_body */
- PipelineStatisticsQueryUtilities::dummy_fs_code, DE_NULL, /* gs_body */
+ PipelineStatisticsQueryUtilities::minimal_fs_code, DE_NULL, /* gs_body */
DE_NULL, /* tc_body */
DE_NULL, /* te_body */
- PipelineStatisticsQueryUtilities::dummy_vs_code);
+ PipelineStatisticsQueryUtilities::minimal_vs_code);
gl.useProgram(m_po_id);
GLU_EXPECT_NO_ERROR(gl.getError(), "glUseProgram() call failed.");
/** Initializes all GL objects used by the test */
void PipelineStatisticsQueryTestFunctional8::initObjects()
{
- const glw::Functions& gl = m_context.getRenderContext().getFunctions();
-
+ const glw::Functions& gl = m_context.getRenderContext().getFunctions();
+ const char* cs_code = 0;
/* This test should not execute if we don't have compute shaders */
- if (!glu::contextSupports(m_context.getRenderContext().getType(), glu::ApiType::core(4, 3)) &&
- !m_context.getContextInfo().isExtensionSupported("GL_ARB_compute_shader"))
+ if (glu::contextSupports(m_context.getRenderContext().getType(), glu::ApiType::core(4, 3)))
+ {
+ cs_code = PipelineStatisticsQueryUtilities::minimal_cs_code;
+ }
+ else if (m_context.getContextInfo().isExtensionSupported("GL_ARB_compute_shader") &&
+ m_context.getContextInfo().isExtensionSupported("GL_ARB_shader_atomic_counters"))
+ {
+ cs_code = PipelineStatisticsQueryUtilities::minimal_cs_code_arb;
+ }
+ else
{
throw tcu::NotSupportedError("OpenGL 4.3+ / compute shaders required to run this test.");
}
- buildProgram(PipelineStatisticsQueryUtilities::dummy_cs_code, DE_NULL, /* fs_body */
+ buildProgram(cs_code, DE_NULL, /* fs_body */
DE_NULL, /* gs_body */
DE_NULL, /* tc_body */
DE_NULL, /* te_body */
/* Code of a compute shader used by a functional test that verifies
* GL_COMPUTE_SHADER_INVOCATIONS_ARB query works correctly.
*/
- static const char* dummy_cs_code;
+ static const char* minimal_cs_code;
+ static const char* minimal_cs_code_arb;
/* Code of a fragment shader used by a number of functional tests */
- static const char* dummy_fs_code;
+ static const char* minimal_fs_code;
/* Code of a tessellation control shader used by a functional test that verifies
* GL_TESS_CONTROL_SHADER_PATCHES_ARB and GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB
* queries work correctly.
*/
- static const char* dummy_tc_code;
+ static const char* minimal_tc_code;
/* Code of a tessellation evaluation shader used by a functional test that verifies
* GL_TESS_CONTROL_SHADER_PATCHES_ARB and GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB
* queries work correctly.
*/
- static const char* dummy_te_code;
+ static const char* minimal_te_code;
/* Code of a vertex shader used by a number of functional tests */
- static const char* dummy_vs_code;
+ static const char* minimal_vs_code;
/* Tells how many query targets are stored in query_targets */
static const unsigned int n_query_targets;
*/
bool PixelPackBufferStorageTestCase::initTestCaseGlobal()
{
- /* Determine dummy vertex shader and fragment shader that will generate black-to-white gradient. */
+ /* Determine vertex shader and fragment shader that will generate black-to-white gradient. */
const char* gradient_fs_code = "#version 330 core\n"
"\n"
"out vec4 result;\n"
/* Only perform the check if the offsets refer to pages with physical backing.
*
- * Note that, on platforms, whose page size % 4 != 0, the values can land partially in the no-man's land,
+ * Note that, on platforms, whose page size % 4 != 0, the values can land partially out of bounds,
* and partially in the safe zone. In such cases, skip the verification. */
const bool result_instance_id_page_has_physical_backing =
(((((char*)result_instance_id_traveller_ptr - (char*)result_ptr) / m_page_size) % 2) ==
const char* GL::shader_version = "#version 430 core\n\n";
} /* namespace Interface */
-/* Dummy fragment shader source code.
+/* Minimal fragment shader source code.
* Used when testing the vertex shader. */
const std::string default_fragment_shader_source = "//default fragment shader\n"
"out vec4 color;\n"
" color = vec4(1.0);\n"
"}\n";
-/* Dummy vertex shader source code.
+/* Minimal vertex shader source code.
* Used when testing the fragment shader. */
const std::string default_vertex_shader_source = "//default vertex shader\n"
"\n"
" gl_Position = vec4(0.0,0.0,0.0,1.0);\n"
"}\n";
-/* Dummy geometry shader source code.
+/* Simple geometry shader source code.
* Used when testing the other shaders. */
const std::string default_geometry_shader_source = "//default geometry\n"
"\n"
" EmitVertex();\n"
"}\n";
-/* Dummy tesselation control shader source code.
+/* Simple tesselation control shader source code.
* Used when testing the other shaders. */
const std::string default_tc_shader_source = "//default tcs\n"
"\n"
" gl_TessLevelInner[1] = 1.0;\n"
"}\n";
-/* Dummy tesselation evaluation shader source code.
+/* Minimal tesselation evaluation shader source code.
* Used when testing the other shaders. */
const std::string default_te_shader_source = "//default tes\n"
"\n"
namespace glcts
{
-static const char* dummy_fs_code = "${VERSION}\n"
+static const char* minimal_fs_code =
+ "${VERSION}\n"
"\n"
"precision highp float;\n"
"\n"
" result = vec4(1.0);\n"
"}\n";
-static const char* dummy_gs_code = "${VERSION}\n"
+static const char* minimal_gs_code =
+ "${VERSION}\n"
"${GEOMETRY_SHADER_REQUIRE}\n"
"\n"
"layout (points) in;\n"
" EmitVertex();\n"
"}\n";
-static const char* dummy_vs_code = "${VERSION}\n"
+static const char* minimal_vs_code =
+ "${VERSION}\n"
"\n"
"${OUT_PER_VERTEX_DECL}"
"\n"
}
/* Initialize the program object */
- std::string specialized_dummy_fs = specializeShader(1,
- /* parts */ &dummy_fs_code);
- const char* specialized_dummy_fs_raw = specialized_dummy_fs.c_str();
- std::string specialized_dummy_vs = specializeShader(1,
- /* parts */ &dummy_vs_code);
- const char* specialized_dummy_vs_raw = specialized_dummy_vs.c_str();
+ std::string specialized_minimal_fs = specializeShader(1,
+ /* parts */ &minimal_fs_code);
+ const char* specialized_minimal_fs_raw = specialized_minimal_fs.c_str();
+ std::string specialized_minimal_vs = specializeShader(1,
+ /* parts */ &minimal_vs_code);
+ const char* specialized_minimal_vs_raw = specialized_minimal_vs.c_str();
m_fs_id = gl.createShader(GL_FRAGMENT_SHADER);
m_vs_id = gl.createShader(GL_VERTEX_SHADER);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateProgram() call failed.");
- if (!TestCaseBase::buildProgram(m_po_id, m_fs_id, 1, &specialized_dummy_fs_raw, m_vs_id, 1,
- &specialized_dummy_vs_raw))
+ if (!TestCaseBase::buildProgram(m_po_id, m_fs_id, 1, &specialized_minimal_fs_raw, m_vs_id, 1,
+ &specialized_minimal_vs_raw))
{
- m_testCtx.getLog() << tcu::TestLog::Message << "Failed to build a dummy test program object"
+ m_testCtx.getLog() << tcu::TestLog::Message << "Failed to build a minimal test program object"
<< tcu::TestLog::EndMessage;
result = false;
throw tcu::NotSupportedError(GEOMETRY_SHADER_EXTENSION_NOT_SUPPORTED, "", __FILE__, __LINE__);
}
- /* Prepare specialized versions of dummy fragment & vertex shaders */
- std::string dummy_fs_specialized = specializeShader(1,
- /* parts */ &dummy_fs_code);
- const char* dummy_fs_specialized_raw = dummy_fs_specialized.c_str();
- std::string dummy_vs_specialized = specializeShader(1, &dummy_vs_code);
- const char* dummy_vs_specialized_raw = dummy_vs_specialized.c_str();
+ /* Prepare specialized versions of minimal fragment & vertex shaders */
+ std::string minimal_fs_specialized = specializeShader(1,
+ /* parts */ &minimal_fs_code);
+ const char* minimal_fs_specialized_raw = minimal_fs_specialized.c_str();
+ std::string minimal_vs_specialized = specializeShader(1, &minimal_vs_code);
+ const char* minimal_vs_specialized_raw = minimal_vs_specialized.c_str();
/* Set up the fragment & the vertex shaders */
m_fs_id = gl.createShader(GL_FRAGMENT_SHADER);
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
- if (!buildShader(m_fs_id, dummy_fs_specialized_raw) || !buildShader(m_vs_id, dummy_vs_specialized_raw))
+ if (!buildShader(m_fs_id, minimal_fs_specialized_raw) || !buildShader(m_vs_id, minimal_vs_specialized_raw))
{
m_testCtx.getLog() << tcu::TestLog::Message << "Either FS or VS failed to build." << tcu::TestLog::EndMessage;
GLU_EXPECT_NO_ERROR(gl.getError(), "glAttachShader() call(s) failed.");
/* Set up the fragment & the vertex shader programs */
- if (!buildShaderProgram(&m_fs_po_id, GL_FRAGMENT_SHADER, dummy_fs_specialized_raw) ||
- !buildShaderProgram(&m_vs_po_id, GL_VERTEX_SHADER, dummy_vs_specialized_raw))
+ if (!buildShaderProgram(&m_fs_po_id, GL_FRAGMENT_SHADER, minimal_fs_specialized_raw) ||
+ !buildShaderProgram(&m_vs_po_id, GL_VERTEX_SHADER, minimal_vs_specialized_raw))
{
m_testCtx.getLog() << tcu::TestLog::Message << "Either FS or VS SPOs failed to build."
<< tcu::TestLog::EndMessage;
GLU_EXPECT_NO_ERROR(gl.getError(), "glBindVertexArray() call failed.");
/* Create shader program objects */
- std::string code_fs_specialized = specializeShader(1, /* parts */
- &dummy_fs_code);
+ std::string code_fs_specialized = specializeShader(1, /* parts */
+ &minimal_fs_code);
const char* code_fs_specialized_raw = code_fs_specialized.c_str();
std::string code_gs_specialized = specializeShader(1, /* parts */
- &dummy_gs_code);
+ &minimal_gs_code);
const char* code_gs_specialized_raw = code_gs_specialized.c_str();
glw::GLint link_status = GL_FALSE;
if (link_status != GL_TRUE)
{
- m_testCtx.getLog() << tcu::TestLog::Message << "Dummy fragment shader program failed to link."
+ m_testCtx.getLog() << tcu::TestLog::Message << "Minimal fragment shader program failed to link."
<< tcu::TestLog::EndMessage;
result = false;
if (link_status != GL_TRUE)
{
- m_testCtx.getLog() << tcu::TestLog::Message << "Dummy geometry shader program failed to link."
+ m_testCtx.getLog() << tcu::TestLog::Message << "Minimal geometry shader program failed to link."
<< tcu::TestLog::EndMessage;
result = false;
GLU_EXPECT_NO_ERROR(gl.getError(), "glTransformFeedbackVaryings() call failed.");
/* Try to link the test program object */
- fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ fs_code_specialized = specializeShader(1, &minimal_fs_code);
fs_code_specialized_raw = fs_code_specialized.c_str();
gs_code_specialized = getGSCode();
gs_code_specialized_raw = gs_code_specialized.c_str();
- vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ vs_code_specialized = specializeShader(1, &minimal_vs_code);
vs_code_specialized_raw = vs_code_specialized.c_str();
if (!TestCaseBase::buildProgram(m_po_id, m_gs_id, 1, /* n_sh1_body_parts */
GLU_EXPECT_NO_ERROR(gl.getError(), "glTransformFeedbackVaryings() call failed.");
/* Try to link the test program object */
- fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ fs_code_specialized = specializeShader(1, &minimal_fs_code);
fs_code_specialized_raw = fs_code_specialized.c_str();
gs_code_specialized = getGSCode();
gs_code_specialized_raw = gs_code_specialized.c_str();
- vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ vs_code_specialized = specializeShader(1, &minimal_vs_code);
vs_code_specialized_raw = vs_code_specialized.c_str();
if (!TestCaseBase::buildProgram(m_po_id, m_gs_id, 1, /* n_sh1_body_parts */
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ fs_code_specialized = specializeShader(1, &minimal_fs_code);
fs_code_specialized_raw = fs_code_specialized.c_str();
gs_code_specialized = getGSCode();
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ fs_code_specialized = specializeShader(1, &minimal_fs_code);
fs_code_specialized_raw = fs_code_specialized.c_str();
gs_code_specialized = getGSCode();
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
- std::string gs_code_specialized = specializeShader(1, &dummy_gs_code);
+ std::string gs_code_specialized = specializeShader(1, &minimal_gs_code);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
if (!TestCaseBase::buildProgram(m_fs_po_id, m_fs_id, 1, /* n_sh1_body_parts */
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_codes_specialized[] = { specializeShader(1, &gs_code_points), specializeShader(1, &gs_code_lines),
const char* gs_codes_specialized_raw[] = { gs_codes_specialized[0].c_str(), gs_codes_specialized[1].c_str(),
gs_codes_specialized[2].c_str(), gs_codes_specialized[3].c_str(),
gs_codes_specialized[4].c_str() };
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
for (glw::GLuint i = 0; i < m_number_of_gs; ++i)
const char* gs_codes_specialized_raw[] = { gs_codes_specialized[0].c_str(), gs_codes_specialized[1].c_str() };
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
for (glw::GLuint i = 0; i < m_number_of_gs; ++i)
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
if (!TestCaseBase::buildProgram(m_po_id, m_fs_id, 1, /* n_sh1_body_parts */
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
for (int i = 0; i < number_of_combinations; ++i)
namespace glcts
{
-static const char* dummy_fs_code = "${VERSION}\n"
+static const char* minimal_fs_code = "${VERSION}\n"
"\n"
"precision highp float;\n"
"\n"
" result = vec4(1.0);\n"
"}\n";
-static const char* dummy_gs_code = "${VERSION}\n"
+static const char* minimal_gs_code = "${VERSION}\n"
"${GEOMETRY_SHADER_REQUIRE}\n"
"\n"
"layout (points) in;\n"
" EmitVertex();\n"
"}\n";
-static const char* dummy_vs_code = "${VERSION}\n"
+static const char* minimal_vs_code = "${VERSION}\n"
"\n"
"${OUT_PER_VERTEX_DECL}"
"\n"
void GeometryShaderIncompleteProgramObjectsTest::initShaderObjects()
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
- std::string specialized_fs_code = specializeShader(1, &dummy_fs_code);
+ std::string specialized_fs_code = specializeShader(1, &minimal_fs_code);
const char* specialized_fs_code_raw = specialized_fs_code.c_str();
- std::string specialized_gs_code = specializeShader(1, &dummy_gs_code);
+ std::string specialized_gs_code = specializeShader(1, &minimal_gs_code);
const char* specialized_gs_code_raw = specialized_gs_code.c_str();
m_fs_id = gl.createShader(GL_FRAGMENT_SHADER);
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
- std::string specialized_fs_code = specializeShader(1, &dummy_fs_code);
+ std::string specialized_fs_code = specializeShader(1, &minimal_fs_code);
std::string gs_code = getGeometryShaderCode(current_run);
const char* gs_code_raw = gs_code.c_str();
std::string specialized_gs_code = specializeShader(1, &gs_code_raw);
- std::string specialized_vs_code = specializeShader(1, &dummy_vs_code);
+ std::string specialized_vs_code = specializeShader(1, &minimal_vs_code);
const char* specialized_fs_code_raw = specialized_fs_code.c_str();
const char* specialized_gs_code_raw = specialized_gs_code.c_str();
if (!has_fs_compiled || !has_vs_compiled)
{
- m_testCtx.getLog() << tcu::TestLog::Message << "Dummy FS and/or dummy VS failed to compile"
+ m_testCtx.getLog() << tcu::TestLog::Message << "Minimal FS and/or minimal VS failed to compile"
<< tcu::TestLog::EndMessage;
result = false;
bool* out_has_vs_compiled_successfully)
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
- std::string specialized_fs_code = specializeShader(1, &dummy_fs_code);
+ std::string specialized_fs_code = specializeShader(1, &minimal_fs_code);
const char* specialized_fs_code_raw = specialized_fs_code.c_str();
std::string gs_code = getGSCode(gs_input_primitive_type);
const char* gs_code_raw = gs_code.c_str();
" test = vec3(gl_VertexID);\n"
"}\n";
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code_raw);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
" test = vec4(gl_VertexID);\n"
"}\n";
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code_raw);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
" Color3 = vec4(0.0, 0.0, gl_VertexID, 0.0);\n"
"}\n";
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code_raw);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
" EmitVertex();\n"
"}\n";
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code_raw);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
if (TestCaseBase::buildProgram(m_po_id, m_gs_id, 1, /* n_sh1_body_parts */
" EmitVertex();\n"
"}\n";
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code_raw);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
has_program_link_succeeded = TestCaseBase::buildProgram(
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = getGSCode();
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
if (TestCaseBase::buildProgram(m_po_id, m_gs_id, 1, /* n_sh1_body_parts */
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = getGSCode();
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
if (TestCaseBase::buildProgram(m_po_id, m_gs_id, 1, /* n_sh1_body_parts */
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code);
const char* gs_code_specialized_raw = gs_code_specialized.c_str();
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
if (TestCaseBase::buildProgram(m_po_id, m_gs_id, 1, /* n_sh1_body_parts */
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_codes_specialized[] = { specializeShader(1, &gs_code_points), specializeShader(1, &gs_code_lines),
gs_codes_specialized[2].c_str(), gs_codes_specialized[3].c_str(),
gs_codes_specialized[4].c_str() };
- std::string vs_code_specialized = specializeShader(1, &dummy_vs_code);
+ std::string vs_code_specialized = specializeShader(1, &minimal_vs_code);
const char* vs_code_specialized_raw = vs_code_specialized.c_str();
for (glw::GLuint i = 0; i < m_number_of_gs; ++i)
GLU_EXPECT_NO_ERROR(gl.getError(), "glCreateShader() call(s) failed.");
/* Try to link the test program object */
- std::string fs_code_specialized = specializeShader(1, &dummy_fs_code);
+ std::string fs_code_specialized = specializeShader(1, &minimal_fs_code);
const char* fs_code_specialized_raw = fs_code_specialized.c_str();
std::string gs_code_specialized = specializeShader(1, &gs_code);
shaderSourceSpecialized(run.vs_id, 1 /* count */, &vs_body);
GLU_EXPECT_NO_ERROR(gl.getError(), "glShaderSource() call failed for vertex shader");
- /* Set dummy fragment shader's body */
+ /* Set minimal fragment shader's body */
const char* fs_body = "${VERSION}\n"
"\n"
"void main()\n"
}
}
-/** Retrieves a dummy fragment shader code to be used for forming program objects
+/** Retrieves a minimal fragment shader code to be used for forming program objects
* used by the test.
*
* @return As per description.
return result;
}
-/** Retrieves a dummy vertex shader code to be used for forming program objects
+/** Retrieves a minimal vertex shader code to be used for forming program objects
* used by the test.
*
* @return As per description.
// Translate to argc, argv
vector<const char*> argv;
- argv.push_back("cts-runner"); // Dummy binary name
+ argv.push_back("cts-runner"); // Assumed binary name
for (vector<string>::const_iterator i = args.begin(); i != args.end(); i++)
argv.push_back(i->c_str());
*
*//*!
* \file
- * \brief Null (dummy) Vulkan implementation.
+ * \brief Null (do-nothing) Vulkan implementation.
*//*--------------------------------------------------------------------*/
#include "vkNullDriver.hpp"
*
*//*!
* \file
- * \brief Null (dummy) Vulkan implementation.
+ * \brief Null (do-nothing) Vulkan implementation.
*//*--------------------------------------------------------------------*/
#include "vkDefs.hpp"
TCU_THROW(NotSupportedError, "External handle type requires dedicated allocation");
}
-void submitDummySignal (const vk::DeviceInterface& vkd,
+void submitEmptySignal (const vk::DeviceInterface& vkd,
vk::VkQueue queue,
vk::VkSemaphore semaphore)
{
VK_CHECK(vkd.queueSubmit(queue, 1, &submit, (vk::VkFence)0u));
}
-void submitDummySignalAndGetSemaphoreNative ( const vk::DeviceInterface& vk,
+void submitEmptySignalAndGetSemaphoreNative ( const vk::DeviceInterface& vk,
vk::VkDevice device,
vk::VkQueue queue,
deUint32 queueFamilyIndex,
VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
/*
- The submitDummySignal function calls vkQueueSubmit with an empty VkSubmitInfo structure and a
+ The submitEmptySignal function calls vkQueueSubmit with an empty VkSubmitInfo structure and a
VkSemaphore to be signalled when the work is finished. Because there is no work in the submission, vkQueueSubmit
may signal the semaphore immediately. When a semaphore's file descriptor is obtained using vkGetFenceFdKHR, if the
handle type is VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR, vkGetFenceFdKHR is allowed to return -1 if the fence
VK_CHECK(vk.queueWaitIdle(queue));
}
-void submitDummyWait (const vk::DeviceInterface& vkd,
+void submitEmptyWait (const vk::DeviceInterface& vkd,
vk::VkQueue queue,
vk::VkSemaphore semaphore)
{
VK_CHECK(vkd.queueSubmit(queue, 1, &submit, (vk::VkFence)0u));
}
-void submitDummySignal (const vk::DeviceInterface& vkd,
+void submitEmptySignal (const vk::DeviceInterface& vkd,
vk::VkQueue queue,
vk::VkFence fence)
{
VK_CHECK(vkd.queueSubmit(queue, 1, &submit, fence));
}
-void submitDummySignalAndGetFenceNative ( const vk::DeviceInterface& vk,
+void submitEmptySignalAndGetFenceNative ( const vk::DeviceInterface& vk,
vk::VkDevice device,
vk::VkQueue queue,
deUint32 queueFamilyIndex,
VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &cmdBufferBeginInfo));
/*
- The submitDummySignal function calls vkQueueSubmit with an empty VkSubmitInfo structure and a
+ The submitEmptySignal function calls vkQueueSubmit with an empty VkSubmitInfo structure and a
VkFence to be signalled when the work is finished. Because there is no work in the submission, vkQueueSubmit
could signal the fence immediately. When a fence's file descriptor is obtained using vkGetFenceFdKHR, if the
handle type is VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR, vkGetFenceFdKHR is allowed to return -1 instead of a
const vk::Unique<vk::VkSemaphore> semaphore (vk::createSemaphore(vkd, *device, &createInfo));
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *semaphore);
+ submitEmptySignal(vkd, queue, *semaphore);
NativeHandle handleA;
getSemaphoreNative(vkd, *device, *semaphore, config.externalType, handleA);
const vk::Unique<vk::VkSemaphore> semaphoreA (createAndImportSemaphore(vkd, *device, config.externalType, handleA, flags));
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreA);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphore);
- submitDummyWait(vkd, queue, *semaphoreA);
+ submitEmptySignal(vkd, queue, *semaphore);
+ submitEmptyWait(vkd, queue, *semaphoreA);
}
else
DE_FATAL("Unknown transference.");
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, handleA);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, handleA);
else
getSemaphoreNative(vkd, *device, *semaphore, config.externalType, handleA);
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, handleB, flags));
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreA);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
}
else
DE_FATAL("Unknown transference.");
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handleA);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handleA);
else
getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handleA);
importSemaphore(vkd, *device, *semaphoreB, config.externalType, handleB, flags);
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
}
else
DE_FATAL("Unknown transference.");
{
NativeHandle handle;
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
{
const vk::VkSemaphoreImportFlags flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_SEMAPHORE_IMPORT_TEMPORARY_BIT : (vk::VkSemaphoreImportFlagBits)0u;
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, handle, flags));
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
}
getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
- submitDummySignal(vkd, queue, *semaphoreA);
+ submitEmptySignal(vkd, queue, *semaphoreA);
{
{
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, handle, flags));
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
}
}
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
NativeHandle handle;
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, handle, flags));
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
const vk::Unique<vk::VkSemaphore> semaphoreB (createSemaphore(vkd, *device));
NativeHandle handle;
- submitDummySignal(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
else
getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
importSemaphore(vkd, *device, *semaphoreB, config.externalType, handle, flags);
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
}
else
DE_FATAL("Unknown transference.");
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
else
getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handle);
- submitDummySignal(vkd, queue, *semaphoreB);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
importSemaphore(vkd, *device, *semaphoreB, config.externalType, handle, flags);
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
}
else
DE_FATAL("Unknown transference.");
}
}
+tcu::TestStatus testSemaphoreImportSyncFdSignaled (Context& context,
+ const SemaphoreTestConfig config)
+{
+ const vk::PlatformInterface& vkp (context.getPlatformInterface());
+ const CustomInstance instance (createTestInstance(context, config.externalType, 0u, 0u));
+ const vk::InstanceDriver& vki (instance.getDriver());
+ const vk::VkPhysicalDevice physicalDevice (vk::chooseDevice(vki, instance, context.getTestContext().getCommandLine()));
+ const deUint32 queueFamilyIndex (chooseQueueFamilyIndex(vki, physicalDevice, 0u));
+ const vk::VkSemaphoreImportFlags flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_SEMAPHORE_IMPORT_TEMPORARY_BIT : (vk::VkSemaphoreImportFlagBits)0u;
+
+ checkSemaphoreSupport(vki, physicalDevice, config.externalType);
+
+ {
+ const vk::Unique<vk::VkDevice> device (createTestDevice(context, vkp, instance, vki, physicalDevice, config.externalType, 0u, 0u, queueFamilyIndex));
+ const vk::DeviceDriver vkd (vkp, instance, *device);
+ const vk::VkQueue queue (getQueue(vkd, *device, queueFamilyIndex));
+ NativeHandle handle = -1;
+ const vk::Unique<vk::VkSemaphore> semaphore (createAndImportSemaphore(vkd, *device, config.externalType, handle, flags));
+
+ submitEmptyWait(vkd, queue, *semaphore);
+
+ return tcu::TestStatus::pass("Pass");
+ }
+}
+
tcu::TestStatus testSemaphoreMultipleExports (Context& context,
const SemaphoreTestConfig config)
{
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, handle);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, handle);
else
getSemaphoreNative(vkd, *device, *semaphore, config.externalType, handle);
}
- submitDummySignal(vkd, queue, *semaphore);
- submitDummyWait(vkd, queue, *semaphore);
+ submitEmptySignal(vkd, queue, *semaphore);
+ submitEmptyWait(vkd, queue, *semaphore);
VK_CHECK(vkd.queueWaitIdle(queue));
}
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handleA);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handleA);
else
getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, handleA);
if (transference == TRANSFERENCE_COPY)
{
importSemaphore(vkd, *device, *semaphoreA, config.externalType, handleA, flags);
- submitDummyWait(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreA);
}
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreA);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreA);
}
else
DE_FATAL("Unknown transference.");
const vk::Unique<vk::VkSemaphore> semaphoreA (createExportableSemaphore(vkd, *device, config.externalType));
NativeHandle handle;
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, handle);
{
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, handle, flags));
{
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
- submitDummySignal(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreB);
- submitDummyWait(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
}
else if (transference== TRANSFERENCE_REFERENCE)
{
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
- submitDummySignal(vkd, queue, *semaphoreB);
- submitDummyWait(vkd, queue, *semaphoreA);
+ submitEmptySignal(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreA);
VK_CHECK(vkd.queueWaitIdle(queue));
}
else
{
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
- submitDummySignal(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreB);
- submitDummyWait(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
}
else if (transference== TRANSFERENCE_REFERENCE)
{
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
VK_CHECK(vkd.queueWaitIdle(queue));
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummySignal(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptySignal(vkd, queue, *semaphoreB);
- submitDummyWait(vkd, queue, *semaphoreB);
- submitDummyWait(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreA);
VK_CHECK(vkd.queueWaitIdle(queue));
}
else
NativeHandle fd;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
else
getSemaphoreNative(vkd, *device, *semaphoreA, config.externalType, fd);
const vk::Unique<vk::VkSemaphore> semaphoreB (createAndImportSemaphore(vkd, *device, config.externalType, newFd, flags));
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptyWait(vkd, queue, *semaphoreB);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreB);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreB);
}
else
DE_FATAL("Unknown permanence.");
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreB, config.externalType, secondFd);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreB, config.externalType, secondFd);
}
else
{
const vk::Unique<vk::VkSemaphore> semaphoreC (createAndImportSemaphore(vkd, *device, config.externalType, secondFd, flags));
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreC);
+ submitEmptyWait(vkd, queue, *semaphoreC);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreC);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreC);
}
else
DE_FATAL("Unknown permanence.");
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreB, config.externalType, secondFd);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreA, config.externalType, fd);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphoreB, config.externalType, secondFd);
}
else
{
const vk::Unique<vk::VkSemaphore> semaphoreC (createAndImportSemaphore(vkd, *device, config.externalType, secondFd, flags));
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *semaphoreC);
+ submitEmptyWait(vkd, queue, *semaphoreC);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *semaphoreA);
- submitDummyWait(vkd, queue, *semaphoreC);
+ submitEmptySignal(vkd, queue, *semaphoreA);
+ submitEmptyWait(vkd, queue, *semaphoreC);
}
else
DE_FATAL("Unknown permanence.");
NativeHandle fd;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, fd);
+ submitEmptySignalAndGetSemaphoreNative(vkd, *device, queue, queueFamilyIndex, *semaphore, config.externalType, fd);
else
getSemaphoreNative(vkd, *device, *semaphore, config.externalType, fd);
const vk::Unique<vk::VkSemaphore> newSemaphore (createAndImportSemaphore(vkd, *device, config.externalType, newFd, flags));
if (transference == TRANSFERENCE_COPY)
- submitDummyWait(vkd, queue, *newSemaphore);
+ submitEmptyWait(vkd, queue, *newSemaphore);
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *newSemaphore);
- submitDummyWait(vkd, queue, *newSemaphore);
+ submitEmptySignal(vkd, queue, *newSemaphore);
+ submitEmptyWait(vkd, queue, *newSemaphore);
}
else
DE_FATAL("Unknown permanence.");
const vk::Unique<vk::VkFence> fence (vk::createFence(vkd, *device, &createInfo));
if (transference == TRANSFERENCE_COPY)
- submitDummySignal(vkd, queue, *fence);
+ submitEmptySignal(vkd, queue, *fence);
NativeHandle handleA;
getFenceNative(vkd, *device, *fence, config.externalType, handleA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fence);
+ submitEmptySignal(vkd, queue, *fence);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
}
else
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, handleA);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, handleA);
else
getFenceNative(vkd, *device, *fence, config.externalType, handleA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
}
else
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handleA);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handleA);
else
getFenceNative(vkd, *device, *fenceA, config.externalType, handleA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
}
else
{
NativeHandle handle;
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
{
const vk::VkFenceImportFlags flags = config.permanence == PERMANENCE_TEMPORARY ? vk::VK_FENCE_IMPORT_TEMPORARY_BIT : (vk::VkFenceImportFlagBits)0u;
getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
{
{
const vk::Unique<vk::VkFence> fenceB (createAndImportFence(vkd, *device, config.externalType, handle, flags));
const vk::Unique<vk::VkFence> fenceB (createAndImportFence(vkd, *device, config.externalType, handle, flags));
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.queueWaitIdle(queue));
const vk::Unique<vk::VkFence> fenceB (createFence(vkd, *device));
NativeHandle handle;
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.queueWaitIdle(queue));
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
else
getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
}
else
const vk::Unique<vk::VkFence> fenceC (createFence(vkd, *device));
NativeHandle handle;
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.queueWaitIdle(queue));
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
NativeHandle handleB (handle);
importFence(vkd, *device, *fenceB, config.externalType, handleB, flags);
importFence(vkd, *device, *fenceC, config.externalType, handle, flags);
// vkResetFences() should have restored fenceBs prior state and should be now reset
// or fenceB should have it's separate payload
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
}
else if (config.permanence == PERMANENCE_PERMANENT)
DE_ASSERT(transference == TRANSFERENCE_REFERENCE);
// Reset fences should have reset all of the fences
- submitDummySignal(vkd, queue, *fenceC);
+ submitEmptySignal(vkd, queue, *fenceC);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
else
getFenceNative(vkd, *device, *fenceA, config.externalType, handle);
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.queueWaitIdle(queue));
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
}
else
NativeHandle handle;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, handle, exportNdx == 0 /* expect fence to be signaled after first pass */);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, handle, exportNdx == 0 /* expect fence to be signaled after first pass */);
else
getFenceNative(vkd, *device, *fence, config.externalType, handle, exportNdx == 0 /* expect fence to be signaled after first pass */);
}
- submitDummySignal(vkd, queue, *fence);
+ submitEmptySignal(vkd, queue, *fence);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fence, VK_TRUE, ~0ull));
VK_CHECK(vkd.queueWaitIdle(queue));
NativeHandle handleA;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handleA);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handleA);
else
getFenceNative(vkd, *device, *fenceA, config.externalType, handleA);
}
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
}
else
const vk::Unique<vk::VkFence> fenceA (createExportableFence(vkd, *device, config.externalType));
NativeHandle handle;
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, handle);
{
const vk::Unique<vk::VkFence> fenceB (createAndImportFence(vkd, *device, config.externalType, handle, flags));
{
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.queueWaitIdle(queue));
VK_CHECK(vkd.resetFences(*device, 1u, &*fenceB));
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.queueWaitIdle(queue));
VK_CHECK(vkd.resetFences(*device, 1u, &*fenceB));
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.resetFences(*device, 1u, &*fenceA));
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
VK_CHECK(vkd.queueWaitIdle(queue));
}
{
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.queueWaitIdle(queue));
VK_CHECK(vkd.resetFences(*device, 1u, &*fenceB));
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.resetFences(*device, 1u, &*fenceA));
VK_CHECK(vkd.resetFences(*device, 1u, &*fenceB));
- submitDummySignal(vkd, queue, *fenceA);
- submitDummySignal(vkd, queue, *fenceB);
+ submitEmptySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceB);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceA, VK_TRUE, ~0ull));
NativeHandle fd;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
else
getFenceNative(vkd, *device, *fenceA, config.externalType, fd);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceB, VK_TRUE, ~0ull));
}
else
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceB, config.externalType, secondFd);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceB, config.externalType, secondFd);
}
else
{
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceC, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceC, VK_TRUE, ~0ull));
}
else
if (transference == TRANSFERENCE_COPY)
{
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceB, config.externalType, secondFd);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceA, config.externalType, fd);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fenceB, config.externalType, secondFd);
}
else
{
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceC, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *fenceA);
+ submitEmptySignal(vkd, queue, *fenceA);
VK_CHECK(vkd.waitForFences(*device, 1u, &*fenceC, VK_TRUE, ~0ull));
}
else
NativeHandle fd;
if (transference == TRANSFERENCE_COPY)
- submitDummySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, fd);
+ submitEmptySignalAndGetFenceNative(vkd, *device, queue, queueFamilyIndex, *fence, config.externalType, fd);
else
getFenceNative(vkd, *device, *fence, config.externalType, fd);
VK_CHECK(vkd.waitForFences(*device, 1u, &*newFence, VK_TRUE, ~0ull));
else if (transference == TRANSFERENCE_REFERENCE)
{
- submitDummySignal(vkd, queue, *newFence);
+ submitEmptySignal(vkd, queue, *newFence);
VK_CHECK(vkd.waitForFences(*device, 1u, &*newFence, VK_TRUE, ~0ull));
}
else
VkPhysicalDeviceBufferDeviceAddressFeatures bufferDeviceAddressFeatures = initVulkanStructure();
VkPhysicalDeviceVulkanMemoryModelFeatures vulkanMemoryModelFeatures = initVulkanStructure();
- struct DummyExtensionFeatures
+ struct UnusedExtensionFeatures
{
VkStructureType sType;
void* pNext;
VkBool32 descriptorIndexing;
VkBool32 samplerFilterMinmax;
- } dummyExtensionFeatures;
+ } unusedExtensionFeatures;
struct FeatureTable
{
FEATURE_TABLE_ITEM(vulkan12Features, shaderAtomicInt64Features, shaderSharedInt64Atomics, "VK_KHR_shader_atomic_int64"),
FEATURE_TABLE_ITEM(vulkan12Features, shaderFloat16Int8Features, shaderFloat16, "VK_KHR_shader_float16_int8"),
FEATURE_TABLE_ITEM(vulkan12Features, shaderFloat16Int8Features, shaderInt8, "VK_KHR_shader_float16_int8"),
- FEATURE_TABLE_ITEM(vulkan12Features, dummyExtensionFeatures, descriptorIndexing, DE_NULL),
+ FEATURE_TABLE_ITEM(vulkan12Features, unusedExtensionFeatures, descriptorIndexing, DE_NULL),
FEATURE_TABLE_ITEM(vulkan12Features, descriptorIndexingFeatures, shaderInputAttachmentArrayDynamicIndexing, "VK_EXT_descriptor_indexing"),
FEATURE_TABLE_ITEM(vulkan12Features, descriptorIndexingFeatures, shaderUniformTexelBufferArrayDynamicIndexing, "VK_EXT_descriptor_indexing"),
FEATURE_TABLE_ITEM(vulkan12Features, descriptorIndexingFeatures, shaderStorageTexelBufferArrayDynamicIndexing, "VK_EXT_descriptor_indexing"),
FEATURE_TABLE_ITEM(vulkan12Features, descriptorIndexingFeatures, descriptorBindingPartiallyBound, "VK_EXT_descriptor_indexing"),
FEATURE_TABLE_ITEM(vulkan12Features, descriptorIndexingFeatures, descriptorBindingVariableDescriptorCount, "VK_EXT_descriptor_indexing"),
FEATURE_TABLE_ITEM(vulkan12Features, descriptorIndexingFeatures, runtimeDescriptorArray, "VK_EXT_descriptor_indexing"),
- FEATURE_TABLE_ITEM(vulkan12Features, dummyExtensionFeatures, samplerFilterMinmax, "VK_EXT_sampler_filter_minmax"),
+ FEATURE_TABLE_ITEM(vulkan12Features, unusedExtensionFeatures, samplerFilterMinmax, "VK_EXT_sampler_filter_minmax"),
FEATURE_TABLE_ITEM(vulkan12Features, scalarBlockLayoutFeatures, scalarBlockLayout, "VK_EXT_scalar_block_layout"),
FEATURE_TABLE_ITEM(vulkan12Features, imagelessFramebufferFeatures, imagelessFramebuffer, "VK_KHR_imageless_framebuffer"),
FEATURE_TABLE_ITEM(vulkan12Features, uniformBufferStandardLayoutFeatures, uniformBufferStandardLayout, "VK_KHR_uniform_buffer_standard_layout"),
DEPENDENCY_DUAL_ITEM (vulkan12Features, vulkanMemoryModelFeatures, vulkanMemoryModelAvailabilityVisibilityChains, vulkanMemoryModel),
};
- deMemset(&dummyExtensionFeatures, 0, sizeof(dummyExtensionFeatures));
+ deMemset(&unusedExtensionFeatures, 0, sizeof(unusedExtensionFeatures));
for (size_t featureTableNdx = 0; featureTableNdx < DE_LENGTH_OF_ARRAY(featureTable); ++featureTableNdx)
{
size_t structSize = testedFeature.coreStructSize;
VkBool32* featurePtr = testedFeature.coreFieldPtr;
- if (structPtr != &dummyExtensionFeatures)
+ if (structPtr != &unusedExtensionFeatures)
features2.pNext = structPtr;
vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
VkBool32* featurePtr = testedFeature.extFieldPtr;
const char* extStringPtr = testedFeature.extString;
- if (structPtr != &dummyExtensionFeatures)
+ if (structPtr != &unusedExtensionFeatures)
features2.pNext = structPtr;
if (extStringPtr == DE_NULL || isExtensionSupported(deviceExtensionProperties, RequiredExtension(extStringPtr)))
const void* pNext;
};
-tcu::TestStatus renderTriangleDummyExtStructTest (Context& context)
+tcu::TestStatus renderTriangleUnusedExtStructTest (Context& context)
{
const VkDevice vkDevice = context.getDevice();
const DeviceInterface& vk = context.getDeviceInterface();
const tcu::Vec4 clearColor (0.125f, 0.25f, 0.75f, 1.0f);
// This structure will stand in as an unknown extension structure that must be ignored by implementations.
- VoidVulkanStruct dummyExtStruct =
+ VoidVulkanStruct unusedExtStruct =
{
VK_STRUCTURE_TYPE_MAX_ENUM, // sType
DE_NULL // pNext
const VkBufferCreateInfo vertexBufferParams =
{
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
0u, // flags
(VkDeviceSize)sizeof(vertices), // size
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, // usage
const VkBufferCreateInfo readImageBufferParams =
{
VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
(VkBufferCreateFlags)0u, // flags
imageSizeBytes, // size
VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
const VkImageCreateInfo imageParams =
{
VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
0u, // flags
VK_IMAGE_TYPE_2D, // imageType
VK_FORMAT_R8G8B8A8_UNORM, // format
const VkRenderPassCreateInfo renderPassInfo =
{
VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
(VkRenderPassCreateFlags)0, // VkRenderPassCreateFlags flags
1u, // deUint32 attachmentCount
&colorAttachmentDescription, // const VkAttachmentDescription* pAttachments
const VkImageViewCreateInfo colorAttViewParams =
{
VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
0u, // flags
*image, // image
VK_IMAGE_VIEW_TYPE_2D, // viewType
const VkPipelineLayoutCreateInfo pipelineLayoutParams =
{
VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
(vk::VkPipelineLayoutCreateFlags)0,
0u, // setLayoutCount
DE_NULL, // pSetLayouts
const struct VkShaderModuleCreateInfo vertModuleInfo =
{
VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
- &dummyExtStruct,
+ &unusedExtStruct,
0,
(deUintptr)vertBin.getSize(),
(const deUint32*)vertBin.getBinary(),
const struct VkShaderModuleCreateInfo fragModuleInfo =
{
VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
- &dummyExtStruct,
+ &unusedExtStruct,
0,
(deUintptr)fragBin.getSize(),
(const deUint32*)fragBin.getBinary(),
VkPipelineShaderStageCreateInfo stageCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
0u, // VkPipelineShaderStageCreateFlags flags
VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage
DE_NULL, // VkShaderModule module
const VkPipelineVertexInputStateCreateInfo vertexInputStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
(VkPipelineVertexInputStateCreateFlags)0, // VkPipelineVertexInputStateCreateFlags flags
1u, // deUint32 vertexBindingDescriptionCount
&vertexInputBindingDescription, // const VkVertexInputBindingDescription* pVertexBindingDescriptions
const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
0u, // VkPipelineInputAssemblyStateCreateFlags flags
VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // VkPrimitiveTopology topology
VK_FALSE // VkBool32 primitiveRestartEnable
const VkPipelineViewportStateCreateInfo viewportStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
(VkPipelineViewportStateCreateFlags)0, // VkPipelineViewportStateCreateFlags flags
(deUint32)viewports.size(), // deUint32 viewportCount
viewports.data(), // const VkViewport* pViewports
const VkPipelineRasterizationStateCreateInfo rasterizationStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
0u, // VkPipelineRasterizationStateCreateFlags flags
VK_FALSE, // VkBool32 depthClampEnable
VK_FALSE, // VkBool32 rasterizerDiscardEnable
const VkPipelineMultisampleStateCreateInfo multisampleStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
0u, // VkPipelineMultisampleStateCreateFlags flags
VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples
VK_FALSE, // VkBool32 sampleShadingEnable
const VkPipelineDepthStencilStateCreateInfo depthStencilStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
0u, // VkPipelineDepthStencilStateCreateFlags flags
VK_FALSE, // VkBool32 depthTestEnable
VK_FALSE, // VkBool32 depthWriteEnable
const VkPipelineColorBlendStateCreateInfo colorBlendStateCreateInfo =
{
VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
0u, // VkPipelineColorBlendStateCreateFlags flags
VK_FALSE, // VkBool32 logicOpEnable
VK_LOGIC_OP_CLEAR, // VkLogicOp logicOp
const VkGraphicsPipelineCreateInfo pipelineCreateInfo =
{
VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType
- &dummyExtStruct, // const void* pNext
+ &unusedExtStruct, // const void* pNext
0u, // VkPipelineCreateFlags flags
(deUint32)pipelineShaderStageParams.size(), // deUint32 stageCount
&pipelineShaderStageParams[0], // const VkPipelineShaderStageCreateInfo* pStages
const VkFramebufferCreateInfo framebufferParams =
{
VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
0u, // flags
*renderPass, // renderPass
1u, // attachmentCount
const VkCommandPoolCreateInfo cmdPoolParams =
{
VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
queueFamilyIndex, // queueFamilyIndex
};
const VkCommandBufferAllocateInfo cmdBufParams =
{
VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
*cmdPool, // pool
VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1u, // bufferCount
const VkCommandBufferBeginInfo commandBufBeginParams =
{
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
- &dummyExtStruct, // const void* pNext;
+ &unusedExtStruct, // const void* pNext;
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, // VkCommandBufferUsageFlags flags;
(const VkCommandBufferInheritanceInfo*)DE_NULL,
};
const VkMemoryBarrier vertFlushBarrier =
{
VK_STRUCTURE_TYPE_MEMORY_BARRIER, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
VK_ACCESS_HOST_WRITE_BIT, // srcAccessMask
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // dstAccessMask
};
const VkImageMemoryBarrier colorAttBarrier =
{
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
- &dummyExtStruct, // pNext
+ &unusedExtStruct, // pNext
0u, // srcAccessMask
(VK_ACCESS_COLOR_ATTACHMENT_READ_BIT|
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT), // dstAccessMask
const VkRenderPassBeginInfo renderPassBeginInfo =
{
VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
- &dummyExtStruct, // const void* pNext;
+ &unusedExtStruct, // const void* pNext;
*renderPass, // VkRenderPass renderPass;
*framebuffer, // VkFramebuffer framebuffer;
makeRect2D(0, 0, renderSize.x(), renderSize.y()), // VkRect2D renderArea;
const VkImageMemoryBarrier imageBarrier =
{
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
- &dummyExtStruct, // const void* pNext;
+ &unusedExtStruct, // const void* pNext;
VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags srcAccessMask;
VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
const VkBufferMemoryBarrier bufferBarrier =
{
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
- &dummyExtStruct, // const void* pNext;
+ &unusedExtStruct, // const void* pNext;
VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
const VkMappedMemoryRange flushRange =
{
VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
- &dummyExtStruct,
+ &unusedExtStruct,
vertexBufferMemory->getMemory(),
vertexBufferMemory->getOffset(),
VK_WHOLE_SIZE
const VkFenceCreateInfo createInfo =
{
VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
- &dummyExtStruct,
+ &unusedExtStruct,
0u
};
const VkSubmitInfo submitInfo =
{
VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
- &dummyExtStruct, // const void* pNext;
+ &unusedExtStruct, // const void* pNext;
0u, // deUint32 waitSemaphoreCount;
DE_NULL, // const VkSemaphore* pWaitSemaphores;
(const VkPipelineStageFlags*)DE_NULL, // const VkPipelineStageFlags* pWaitDstStageMask;
addFunctionCase (smokeTests.get(), "create_sampler", "", createSamplerTest);
addFunctionCaseWithPrograms (smokeTests.get(), "create_shader", "", createShaderProgs, createShaderModuleTest);
addFunctionCaseWithPrograms (smokeTests.get(), "triangle", "", createTriangleProgs, renderTriangleTest);
- addFunctionCaseWithPrograms (smokeTests.get(), "triangle_ext_structs", "", createTriangleProgs, renderTriangleDummyExtStructTest);
+ addFunctionCaseWithPrograms (smokeTests.get(), "triangle_ext_structs", "", createTriangleProgs, renderTriangleUnusedExtStructTest);
addFunctionCaseWithPrograms (smokeTests.get(), "asm_triangle", "", createTriangleAsmProgs, renderTriangleTest);
addFunctionCaseWithPrograms (smokeTests.get(), "asm_triangle_no_opname", "", createProgsNoOpName, renderTriangleTest);
addFunctionCaseWithPrograms (smokeTests.get(), "unused_resolve_attachment", "", createTriangleProgs, renderTriangleUnusedResolveAttachmentTest);
extFunctions.push_back(FunctionInfo("vkGetImageMemoryRequirements2KHR", FUNCTIONORIGIN_DEVICE));
log << tcu::TestLog::Message << "Disabled extensions check - tries to get functions of disabled extensions from proper vkGet*ProcAddr." << tcu::TestLog::EndMessage;
- const char * const result = specialCasesCheck(ctx, log, failsQuantity, extFunctions) ? "Passed" : "Failed";
+ const char * const result = specialCasesCheck(ctx, log, failsQuantity, extFunctions) ? "Passed" : "Failed";
log << tcu::TestLog::Message << result << tcu::TestLog::EndMessage;
}
// Check special cases
{
- FunctionInfosList dummyFunctions = FunctionInfosList();
+ FunctionInfosList nonexistingFunctions = FunctionInfosList();
for (deUint32 i = 0; i <= FUNCTIONORIGIN_DEVICE; ++i)
{
const FunctionOrigin origin = static_cast<FunctionOrigin>(i);
- dummyFunctions.push_back(FunctionInfo("vkSomeName", origin));
- dummyFunctions.push_back(FunctionInfo("vkNonexistingKHR", origin));
- dummyFunctions.push_back(FunctionInfo("", origin));
+ nonexistingFunctions.push_back(FunctionInfo("vkSomeName", origin));
+ nonexistingFunctions.push_back(FunctionInfo("vkNonexistingKHR", origin));
+ nonexistingFunctions.push_back(FunctionInfo("", origin));
}
- log << tcu::TestLog::Message << "Special check - tries to get some dummy functions from various vkGet*ProcAddr." << tcu::TestLog::EndMessage;
- const char * const result = specialCasesCheck(ctx, log, failsQuantity, dummyFunctions) ? "Passed" : "Failed";
+ log << tcu::TestLog::Message << "Special check - tries to get some nonexisting functions from various vkGet*ProcAddr." << tcu::TestLog::EndMessage;
+ const char * const result = specialCasesCheck(ctx, log, failsQuantity, nonexistingFunctions) ? "Passed" : "Failed";
log << tcu::TestLog::Message << result << tcu::TestLog::EndMessage;
}
}
{
if (numInlineUniformBlocks < caseDef.maxInlineUniformBlocks)
{
- arraySizes[b] = randRange(&rnd, 1, (caseDef.maxInlineUniformBlockSize - 16) / 16); // subtract 16 for "ivec4 dummy"
+ arraySizes[b] = randRange(&rnd, 1, (caseDef.maxInlineUniformBlockSize - 16) / 16); // subtract 16 for "ivec4 unused"
arraySizes[b] = de::min(maxArray, arraySizes[b]);
- binding.descriptorCount = (arraySizes[b] ? arraySizes[b] : 1) * 16 + 16; // add 16 for "ivec4 dummy"
+ binding.descriptorCount = (arraySizes[b] ? arraySizes[b] : 1) * 16 + 16; // add 16 for "ivec4 unused"
numInlineUniformBlocks++;
}
}
else
{
- // Plug in a dummy descriptor type, so validation layers that don't
+ // Plug in an unused descriptor type, so validation layers that don't
// support inline_uniform_block don't crash.
binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
}
switch (binding.descriptorType)
{
case VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT:
- decls << "layout(set = " << s << ", binding = " << b << ") uniform inlineubodef" << s << "_" << b << " { ivec4 dummy; int val" << array.str() << "; } inlineubo" << s << "_" << b << ";\n";
+ decls << "layout(set = " << s << ", binding = " << b << ") uniform inlineubodef" << s << "_" << b << " { ivec4 unused; int val" << array.str() << "; } inlineubo" << s << "_" << b << ";\n";
break;
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
{
if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
{
- // Convert to bytes and add 16 for "ivec4 dummy" in case of inline uniform block
+ // Convert to bytes and add 16 for "ivec4 unused" in case of inline uniform block
const deUint32 uboRange = ai*16 + 16;
if (uboRange >= variableDescriptorSizes[s])
continue;
}
else if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
{
- // subtract 16 for "ivec4 dummy"
+ // subtract 16 for "ivec4 unused"
DE_ASSERT(binding.descriptorCount >= 16);
descriptor += binding.descriptorCount - 16;
}
{
if (binding.descriptorType == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK_EXT)
{
- // Convert to bytes and add 16 for "ivec4 dummy" in case of inline uniform block
+ // Convert to bytes and add 16 for "ivec4 unused" in case of inline uniform block
const deUint32 uboRange = ai*16 + 16;
if (uboRange >= variableDescriptorSizes[s])
continue;
};
inlineInfoVec[vecIndex] = iuBlock;
- w.dstArrayElement = ai*16 + 16; // add 16 to skip "ivec4 dummy"
+ w.dstArrayElement = ai*16 + 16; // add 16 to skip "ivec4 unused"
w.pNext = &inlineInfoVec[vecIndex];
w.descriptorCount = sizeof(deUint32);
}
for (size_t scissorIdx = 0; scissorIdx < m_scissors.size(); scissorIdx++)
{
while (scissors.size() <= m_firstScissor + scissorIdx)
- scissors.push_back(makeRect2D(0, 0)); // Add dummy scissor
+ scissors.push_back(makeRect2D(0, 0)); // Add empty scissor
scissors[m_firstScissor + scissorIdx] = m_scissors[scissorIdx];
}
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
const Move<VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
- const VkExtent2D renderSizeDummy (makeExtent2D(1u, 1u));
+ const VkExtent2D renderSizeUnused (makeExtent2D(1u, 1u));
const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
- const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeDummy, 1u, true));
+ const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeUnused, 1u, true));
const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
const Move<VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
- const VkExtent2D renderSizeDummy (makeExtent2D(1u, 1u));
+ const VkExtent2D renderSizeUnused (makeExtent2D(1u, 1u));
const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
- const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeDummy, 1u, true));
+ const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeUnused, 1u, true));
const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
const Move<VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
- const VkExtent2D renderSizeDummy (makeExtent2D(1u, 1u));
+ const VkExtent2D renderSizeUnused (makeExtent2D(1u, 1u));
const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
- const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeDummy, 0u, true));
+ const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeUnused, 0u, true));
const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
const Move<VkDescriptorSet> descriptorSet (makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout));
- const VkExtent2D renderSizeDummy (makeExtent2D(1u, 1u));
+ const VkExtent2D renderSizeUnused (makeExtent2D(1u, 1u));
const Unique<VkPipelineLayout> pipelineLayout (makePipelineLayout(vk, device, *descriptorSetLayout));
- const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeDummy, 0u, true));
+ const Unique<VkPipeline> pipeline (makeGraphicsPipeline(vk, device, *pipelineLayout, *renderPass, *vertShaderModule, *fragShaderModule, renderSizeUnused, 0u, true));
const Unique<VkCommandPool> cmdPool (createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queueFamilyIndex));
const Unique<VkCommandBuffer> cmdBuffer (allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
if (TEST_TYPE_POINT_SIZE == m_parameters.viewIndex)
{
const deUint32 vertexPerPrimitive = 1u;
- const deUint32 dummyQuarterNdx = 0u;
+ const deUint32 unusedQuarterNdx = 0u;
const int pointSize = static_cast<int>(layerNdx == 0u ? TEST_POINT_SIZE_WIDE : TEST_POINT_SIZE_SMALL);
if (subpassCount == 1)
for (deUint32 drawNdx = 0u; drawNdx < m_squareCount; ++drawNdx)
- setPoint(referenceFrame->getLevel(0), getQuarterRefColor(dummyQuarterNdx, vertexPerPrimitive * drawNdx, layerNdx, false), pointSize, layerNdx, drawNdx);
+ setPoint(referenceFrame->getLevel(0), getQuarterRefColor(unusedQuarterNdx, vertexPerPrimitive * drawNdx, layerNdx, false), pointSize, layerNdx, drawNdx);
else
- setPoint(referenceFrame->getLevel(0), getQuarterRefColor(dummyQuarterNdx, vertexPerPrimitive * subpassQuarterNdx, layerNdx, false), pointSize, layerNdx, subpassQuarterNdx);
+ setPoint(referenceFrame->getLevel(0), getQuarterRefColor(unusedQuarterNdx, vertexPerPrimitive * subpassQuarterNdx, layerNdx, false), pointSize, layerNdx, subpassQuarterNdx);
}
if (TEST_TYPE_MULTISAMPLE == m_parameters.viewIndex)
{
const deUint32 vertexPerPrimitive = 3u;
- const deUint32 dummyQuarterNdx = 0u;
+ const deUint32 unusedQuarterNdx = 0u;
if (subpassCount == 1)
for (deUint32 drawNdx = 0u; drawNdx < m_squareCount; ++drawNdx)
- fillTriangle(referenceFrame->getLevel(0), getQuarterRefColor(dummyQuarterNdx, vertexPerPrimitive * drawNdx, layerNdx, false), layerNdx, drawNdx);
+ fillTriangle(referenceFrame->getLevel(0), getQuarterRefColor(unusedQuarterNdx, vertexPerPrimitive * drawNdx, layerNdx, false), layerNdx, drawNdx);
else
- fillTriangle(referenceFrame->getLevel(0), getQuarterRefColor(dummyQuarterNdx, vertexPerPrimitive * subpassQuarterNdx, layerNdx, false), layerNdx, subpassQuarterNdx);
+ fillTriangle(referenceFrame->getLevel(0), getQuarterRefColor(unusedQuarterNdx, vertexPerPrimitive * subpassQuarterNdx, layerNdx, false), layerNdx, subpassQuarterNdx);
}
}
// maxMultiviewViewCount case
{
const VkExtent3D incompleteExtent3D = { 16u, 16u, 0u };
- const vector<deUint32> dummyMasks;
- const TestParameters parameters = { incompleteExtent3D, dummyMasks, testType, sampleCountFlags, colorFormat, renderPassType };
+ const vector<deUint32> unusedMasks;
+ const TestParameters parameters = { incompleteExtent3D, unusedMasks, testType, sampleCountFlags, colorFormat, renderPassType };
groupShader->addChild(new MultiViewRenderTestsCase(testCtx, "max_multi_view_view_count", "", parameters));
}
break;
case VK_SHADER_STAGE_GEOMETRY_BIT:
- programCollection.glslSources.add("dummy_geo" + missSuffix) << glu::GeometrySource(
+ programCollection.glslSources.add("unused_geo" + missSuffix) << glu::GeometrySource(
"#version 450 \n"
"layout(triangles) in;\n"
"layout(triangle_strip, max_vertices = 3) out;\n"
}
break;
case VK_SHADER_STAGE_GEOMETRY_BIT:
- m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "dummy_geo", "main");
+ m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "unused_geo", "main");
if (m_param->getCompileMissShaders())
{
- m_missPipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "dummy_geo_miss", "main");
+ m_missPipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "unused_geo_miss", "main");
}
break;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
break;
case VK_SHADER_STAGE_GEOMETRY_BIT:
- programCollection.glslSources.add("dummy_geo") << glu::GeometrySource(
+ programCollection.glslSources.add("unused_geo") << glu::GeometrySource(
"#version 450 \n"
"layout(triangles) in;\n"
"layout(triangle_strip, max_vertices = 3) out;\n"
m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_FRAGMENT_BIT, "color_frag", "main");
break;
case VK_SHADER_STAGE_GEOMETRY_BIT:
- m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "dummy_geo", "main");
+ m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "unused_geo", "main");
break;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, "basic_tcs", "main");
break;
case VK_SHADER_STAGE_GEOMETRY_BIT:
- programCollection.glslSources.add("dummy_geo") << glu::GeometrySource(
+ programCollection.glslSources.add("unused_geo") << glu::GeometrySource(
"#version 450 \n"
"layout(triangles) in;\n"
"layout(triangle_strip, max_vertices = 3) out;\n"
}
else
{
- m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "dummy_geo", "main");
+ m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "unused_geo", "main");
}
break;
case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
}
};
-//! Make a dummy sampler.
+//! Make a (unused) sampler.
Move<VkSampler> makeSampler (const DeviceInterface& vk, const VkDevice device)
{
const VkSamplerCreateInfo samplerParams =
}
};
-//! Make a dummy sampler.
+//! Make a (unused) sampler.
Move<VkSampler> makeSampler (const DeviceInterface& vk, const VkDevice device)
{
const VkSamplerCreateInfo samplerParams =
Move<VkBuffer> colorBuffer; //!< Buffer used to copy image data
MovePtr<Allocation> colorBufferAlloc;
VkDeviceSize colorBufferSize;
- Move<VkSampler> defaultSampler; //!< Dummy sampler, we are using texel fetches
+ Move<VkSampler> defaultSampler; //!< Unused sampler, we are using texel fetches
WorkingData (void)
: numVertices ()
<< "} matInst;\n";
break;
case SIZE_CASE_48:
- vertexSrc << "int dummy1;\n"
- << "vec4 dummy2;\n"
+ vertexSrc << "int unused1;\n"
+ << "vec4 unused2;\n"
<< "vec4 color;\n"
<< "} matInst;\n";
break;
src << " layout(offset = " << m_pushConstantRange[rangeNdx].range.offset << ") vec4 color[2];\n";
break;
case SIZE_CASE_36:
- src << " layout(offset = " << m_pushConstantRange[rangeNdx].range.offset << ") int dummy1;\n"
- << " layout(offset = " << (m_pushConstantRange[rangeNdx].range.offset + 4) << ") vec4 dummy2;\n"
+ src << " layout(offset = " << m_pushConstantRange[rangeNdx].range.offset << ") int unused1;\n"
+ << " layout(offset = " << (m_pushConstantRange[rangeNdx].range.offset + 4) << ") vec4 unused2;\n"
<< " layout(offset = " << (m_pushConstantRange[rangeNdx].range.offset + 20) << ") vec4 color;\n";
break;
case SIZE_CASE_128:
{
BasicGraphicsTest::initPrograms(programCollection);
- programCollection.glslSources.add("dummy_geo") << glu::GeometrySource(
+ programCollection.glslSources.add("unused_geo") << glu::GeometrySource(
"#version 310 es\n"
"#extension GL_EXT_geometry_shader : enable\n"
"layout(triangles) in;\n"
if(m_features.geometryShader == VK_TRUE)
{
- m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "dummy_geo");
+ m_pipelineBuilder.bindShaderStage(VK_SHADER_STAGE_GEOMETRY_BIT, "unused_geo");
}
if(m_features.tessellationShader == VK_TRUE)
"layout(set=0, binding=1) buffer ProtectedHelper\n"
"{\n"
" highp uint zero; // set to 0\n"
- " highp uint dummyOut;\n"
+ " highp uint unusedOut;\n"
"} helper;\n"
"\n"
"layout(set=0, binding=2) uniform Data\n"
"void error ()\n"
"{\n"
" for (uint x = 0; x < 10; x += helper.zero)\n"
- " atomicAdd(helper.dummyOut, 1u);\n"
+ " atomicAdd(helper.unusedOut, 1u);\n"
"}\n"
"\n"
"bool compare (${VAR_TYPE} a, ${VAR_TYPE} b, float threshold)\n"
"layout(set=0, binding=1) buffer ProtectedHelper\n"
"{\n"
" highp uint zero; // set to 0\n"
- " highp uint dummyOut;\n"
+ " highp uint unusedOut;\n"
"} helper;\n"
"\n"
"layout(set=0, binding=2) uniform Data\n"
"void error ()\n"
"{\n"
" for (uint x = 0; x < 10; x += helper.zero)\n"
- " atomicAdd(helper.dummyOut, 1u);\n"
+ " atomicAdd(helper.unusedOut, 1u);\n"
"}\n"
"\n"
"bool compare (${VAR_TYPE} a, ${VAR_TYPE} b, float threshold)\n"
"layout(set=0, binding=1) buffer ProtectedHelper\n"
"{\n"
" highp uint zero; // set to 0\n"
- " highp uint dummyOut;\n"
+ " highp uint unusedOut;\n"
"} helper;\n"
"\n"
"void main (void)\n"
"layout(set=0, binding=1) buffer ProtectedHelper\n"
"{\n"
" highp uint zero; // set to 0\n"
- " highp uint dummyOut;\n"
+ " highp uint unusedOut;\n"
"} helper;\n"
"\n"
"layout(set=0, binding=2) uniform Data\n"
"void error ()\n"
"{\n"
" for (uint x = 0; x < 10; x += helper.zero)\n"
- " atomicAdd(helper.dummyOut, 1u);\n"
+ " atomicAdd(helper.unusedOut, 1u);\n"
"}\n"
"\n"
"bool compare (vec4 a, vec4 b, float threshold)\n"
"layout(set=0, binding=1) buffer ProtectedHelper\n"
"{\n"
" highp uint zero; // set to 0\n"
- " highp uint dummyOut;\n"
+ " highp uint unusedOut;\n"
"} helper;\n"
"\n"
"void main (void)\n"
"layout(std140, set = 0, binding = 2) buffer ProtectedHelper\n"
"{\n"
" highp uint zero;\n"
- " highp uint dummyOut;\n"
+ " highp uint unusedOut;\n"
"} helper;\n"
"\n"
"void error()\n"
"{\n"
" for (uint x = 0u; x < 10u; x += helper.zero)\n"
- " atomicAdd(helper.dummyOut, 1u);\n"
+ " atomicAdd(helper.unusedOut, 1u);\n"
"}\n"
"\n"
"${COMPARE_FUNCTION}"
"layout(std140, set=0, binding=2) buffer ProtectedHelper\n"
"{\n"
" highp uint zero; // set to 0\n"
- " highp uint dummyOut;\n"
+ " highp uint unusedOut;\n"
"} helper;\n"
"\n"
"void main (void)\n"
"{\n"
" helper.zero = 0;\n"
- " helper.dummyOut = 0;\n"
+ " helper.unusedOut = 0;\n"
"}\n";
dst.glslSources.add("ResetSSBO") << glu::ComputeSource(resetSSBOShader);
// Code of FragmentDensityMapTestInstance is also used to test subsampledLoads, subsampledCoarseReconstructionEarlyAccess,
// maxDescriptorSetSubsampledSamplers properties.
-// set value of DRY_RUN_WITHOUT_FDM_EXTENSION to 1 for dummy run hat checks the correctness of the code without using VK_EXT_fragment_density_map extension
+// set value of DRY_RUN_WITHOUT_FDM_EXTENSION to 1 for empty run that checks the correctness of the code without using VK_EXT_fragment_density_map extension
#define DRY_RUN_WITHOUT_FDM_EXTENSION 0
namespace vkt
VkDeviceSize vertexBufferOffset = 0u;
- // Command Buffer A will set his own event but wait for the B's event before continuing to the next subpass.
+ // Command Buffer A will set its own event but wait for the B's event before continuing to the next subpass.
beginCommandBuffer(vk, *m_cmdBufferA, 0u);
beginCommandBuffer(vk, *m_cmdBufferB, 0u);
vk.cmdBeginRenderPass(*m_cmdBufferA, &renderPassBeginInfoA, VK_SUBPASS_CONTENTS_INLINE);
// An adapter function matching FillBufferProcPtr interface. Fills a buffer with 0xBABABABABABA... pattern. Used to fill up output buffers.
// Since this pattern cannot show up in generated test data it should not show up in the valid output.
-void populateBufferWithDummy (void* buffer,
- VkDeviceSize size,
- const void* const blob)
+void populateBufferWithFiller (void* buffer,
+ VkDeviceSize size,
+ const void* const blob)
{
DE_UNREF(blob);
deMemset(buffer, 0xBA, static_cast<size_t>(size));
const bool m_accessOutOfBackingMemory;
};
-// In case I detect that some prerequisites are not fullfilled I am creating this lightweight dummy test instance instead of AccessInstance. Should be bit faster that way.
+// In case I detect that some prerequisites are not fullfilled I am creating this lightweight empty test instance instead of AccessInstance. Should be bit faster that way.
class NotSupportedInstance : public vkt::TestInstance
{
public:
};
// A routing generating SPIRV code for all test cases in this group
-std::string MakeShader(VkShaderStageFlags shaderStage, ShaderType shaderType, VkFormat bufferFormat, bool reads, bool dummy)
+std::string MakeShader(VkShaderStageFlags shaderStage, ShaderType shaderType, VkFormat bufferFormat, bool reads, bool unused)
{
// faster to write
const char is = '=';
}
// If we are testing vertex shader or fragment shader we need to provide the other one for the pipeline too.
- // So the not tested one is 'dummy'. It is then a minimal/simplest possible pass-through shader.
- // If we are testing compute shader we dont need dummy shader at all.
- if (dummy)
+ // So the not tested one is 'unused'. It is then a minimal/simplest possible pass-through shader.
+ // If we are testing compute shader we dont need unused shader at all.
+ if (unused)
{
if (shaderStage == VK_SHADER_STAGE_FRAGMENT_BIT)
{
}
}
- // This is common for test shaders and dummy ones
+ // This is common for test shaders and unused ones
// We need to fill stage ouput from shader properly
// output vertices positions in vertex shader
if (shaderStage == VK_SHADER_STAGE_VERTEX_BIT)
}
createTestBuffer(vk, *m_device, inBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc, m_inBuffer, m_inBufferAlloc, m_inBufferAccess, &populateBufferWithValues, &m_bufferFormat);
- createTestBuffer(vk, *m_device, outBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc, m_outBuffer, m_outBufferAlloc, m_outBufferAccess, &populateBufferWithDummy, DE_NULL);
+ createTestBuffer(vk, *m_device, outBufferAccessRange, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT, memAlloc, m_outBuffer, m_outBufferAlloc, m_outBufferAccess, &populateBufferWithFiller, DE_NULL);
deInt32 indices[] = {
(m_accessOutOfBackingMemory && (m_bufferAccessType == BUFFER_ACCESS_TYPE_READ_FROM_STORAGE)) ? static_cast<deInt32>(RobustAccessWithPointersTest::s_testArraySize) - 1 : 0,
}
/*--------------------------------------------------------------------*//*!
- * \brief Dummy placeholder type for unused template parameters.
+ * \brief Empty placeholder type for unused template parameters.
*
* In the precision tests we are dealing with functions of different arities.
* To minimize code duplication, we only define templates with the maximum
template <typename T>
class ExprP : public ExprPBase<T> {};
-// We treat Voids as containers since the dummy parameters in generalized
+// We treat Voids as containers since the unused parameters in generalized
// vector functions are represented as Voids.
template <>
class ExprP<Void> : public ContainerExprPBase<Void> {};
m_executor->execute(int(numValues), inputArr, outputArr);
- // Initialize environment with dummy values so we don't need to bind in inner loop.
+ // Initialize environment with unused values so we don't need to bind in inner loop.
{
const typename Traits<In0>::IVal in0;
const typename Traits<In1>::IVal in1;
return createDescriptorSetLayout(vkd, device, &createInfo);
}
-static Move<VkDescriptorPool> createDummyDescriptorPool (const DeviceInterface& vkd, VkDevice device)
+static Move<VkDescriptorPool> createEmptyDescriptorPool (const DeviceInterface& vkd, VkDevice device)
{
- const VkDescriptorPoolSize dummySize =
+ const VkDescriptorPoolSize emptySize =
{
VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1u,
(VkDescriptorPoolCreateFlags)VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
1u,
1u,
- &dummySize
+ &emptySize
};
return createDescriptorPool(vkd, device, &createInfo);
}
Move<VkCommandBuffer> cmdBuffer;
Unique<VkDescriptorSetLayout> emptyDescriptorSetLayout (createEmptyDescriptorSetLayout(vk, vkDevice));
- Unique<VkDescriptorPool> dummyDescriptorPool (createDummyDescriptorPool(vk, vkDevice));
- Unique<VkDescriptorSet> emptyDescriptorSet (allocateSingleDescriptorSet(vk, vkDevice, *dummyDescriptorPool, *emptyDescriptorSetLayout));
+ Unique<VkDescriptorPool> emptyDescriptorPool (createEmptyDescriptorPool(vk, vkDevice));
+ Unique<VkDescriptorSet> emptyDescriptorSet (allocateSingleDescriptorSet(vk, vkDevice, *emptyDescriptorPool, *emptyDescriptorSetLayout));
clearRenderData();
}
else
{
- // a dummy submission, won't be used in a call to vkQueueBindSparse
+ // an unused submission, won't be used in a call to vkQueueBindSparse
queueSubmissions.push_back(makeSubmissionSparse(sparseQueue, 0u, DE_NULL, 0u, DE_NULL));
}
de::Random rnd (deStringHash(group->getName()));
const int numElements = 128;
const vector<deFloat16> float16Data = getFloat16s(rnd, numElements);
- const vector<deFloat16> float16DummyData (numElements, 0);
+ const vector<deFloat16> float16UnusedData (numElements, 0);
ComputeShaderSpec spec;
std::ostringstream shaderTemplate;
spec.verifyIO = computeCheckBuffersFloats;
spec.coherentMemory = true;
spec.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16Data))));
- spec.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16DummyData))));
+ spec.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16UnusedData))));
spec.extensions.push_back("VK_KHR_16bit_storage");
spec.requestedVulkanFeatures = get16BitStorageFeatures("uniform_buffer_block");
}
};
- vector<deFloat16> float16DummyData (numElements, 0);
+ vector<deFloat16> float16UnusedData (numElements, 0);
for (deUint32 capIdx = 0; capIdx < DE_LENGTH_OF_ARRAY(CAPABILITIES); ++capIdx)
for (deUint32 tyIdx = 0; tyIdx < DE_LENGTH_OF_ARRAY(cTypes[capIdx]); ++tyIdx)
spec.inputs.push_back(Resource(BufferSp(new Float32Buffer(float32Data)), CAPABILITIES[capIdx].dtype));
// We provided a custom verifyIO in the above in which inputs will be used for checking.
- // So put dummy data in the expected values.
- spec.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16DummyData))));
+ // So put unused data in the expected values.
+ spec.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16UnusedData))));
spec.extensions.push_back("VK_KHR_16bit_storage");
spec.requestedVulkanFeatures = get16BitStorageFeatures(CAPABILITIES[capIdx].name);
RGBA defaultColors[4];
const vector<float> float32Data = getFloat32s(rnd, numDataPoints);
vector<float> float32DataPadded;
- vector<deFloat16> float16DummyData (numDataPoints, 0);
+ vector<deFloat16> float16UnusedData (numDataPoints, 0);
const StringTemplate capabilities ("OpCapability ${cap}\n");
for (size_t dataIdx = 0; dataIdx < float32Data.size(); ++dataIdx)
resources.inputs.push_back(Resource(BufferSp(new Float32Buffer(arrayStrides[capIdx] == 4 ? float32Data : float32DataPadded)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
// We use a custom verifyIO to check the result via computing directly from inputs; the contents in outputs do not matter.
- resources.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16DummyData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ resources.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16UnusedData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specs["cap"] = CAPABILITIES[capIdx].cap;
specs["indecor"] = CAPABILITIES[capIdx].decor;
GraphicsResources resources;
resources.inputs.push_back(Resource(BufferSp(new Float32Buffer(float32Data)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
// We use a custom verifyIO to check the result via computing directly from inputs; the contents in outputs do not matter.
- resources.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16DummyData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ resources.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16UnusedData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
{ // vector cases
fragments["pre_main"] =
};
vector<double> float64Data = getFloat64s(rnd, numElements);
- vector<deFloat16> float16DummyData (numElements, 0);
+ vector<deFloat16> float16UnusedData (numElements, 0);
for (deUint32 capIdx = 0; capIdx < DE_LENGTH_OF_ARRAY(CAPABILITIES); ++capIdx)
for (deUint32 tyIdx = 0; tyIdx < DE_LENGTH_OF_ARRAY(cTypes); ++tyIdx)
spec.inputs.push_back(Resource(BufferSp(new Float64Buffer(float64Data, padding)), CAPABILITIES[capIdx].dtype));
// We provided a custom verifyIO in the above in which inputs will be used for checking.
- // So put dummy data in the expected values.
- spec.outputs.push_back(BufferSp(new Float16Buffer(float16DummyData)));
+ // So put unused data in the expected values.
+ spec.outputs.push_back(BufferSp(new Float16Buffer(float16UnusedData)));
spec.extensions.push_back("VK_KHR_16bit_storage");
const deUint32 numDataPoints = 256;
RGBA defaultColors[4];
vector<double> float64Data = getFloat64s(rnd, numDataPoints);
- vector<deFloat16> float16DummyData (numDataPoints, 0);
+ vector<deFloat16> float16UnusedData (numDataPoints, 0);
const StringTemplate capabilities ("OpCapability Float64\n"
"OpCapability ${cap}\n");
// We use a custom verifyIO to check the result via computing directly from inputs; the contents in outputs do not matter.
- resources.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16DummyData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ resources.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16UnusedData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
extensions.push_back("VK_KHR_16bit_storage");
de::Random rnd (deStringHash(group->getName()));
const int numElements = 128;
const vector<deInt8> int8Data = getInt8s(rnd, numElements);
- const vector<deInt8> int8DummyData (numElements, 0);
+ const vector<deInt8> int8UnusedData (numElements, 0);
ComputeShaderSpec spec;
std::ostringstream shaderTemplate;
shaderTemplate<<"OpCapability Shader\n"
spec.verifyIO = computeCheckBuffers;
spec.coherentMemory = true;
spec.inputs.push_back(BufferSp(new Int8Buffer(int8Data)));
- spec.outputs.push_back(BufferSp(new Int8Buffer(int8DummyData)));
+ spec.outputs.push_back(BufferSp(new Int8Buffer(int8UnusedData)));
spec.extensions.push_back("VK_KHR_storage_buffer_storage_class");
spec.extensions.push_back("VK_KHR_8bit_storage");
spec.requestedVulkanFeatures.ext8BitStorage = EXT8BITSTORAGEFEATURES_STORAGE_BUFFER;
" %_ptr_Uniform_Output = OpTypePointer Uniform %Output\n"
" %dataOutput = OpVariable %_ptr_Uniform_Output Uniform\n"
" %_ptr_Uniform_uint = OpTypePointer Uniform %uint\n"
- " %uint_dummy = OpConstant %uint 2863311530\n"
+ " %uint_unused = OpConstant %uint 2863311530\n"
" %main = OpFunction %void None %3\n"
" %5 = OpLabel\n"
" %i = OpVariable %_ptr_Function_uint Function\n"
" OpBranch %merge\n"
" %dead = OpLabel\n"
" %35 = OpAccessChain %_ptr_Uniform_uint %dataOutput %uint_0 %uint_i\n"
- " OpStore %35 %uint_dummy\n"
+ " OpStore %35 %uint_unused\n"
" OpBranch %merge\n"
" %merge = OpLabel\n"
" OpReturn\n"
" %dataOutput = OpVariable %_ptr_Uniform_Output Uniform\n"
" %_ptr_Uniform_uint = OpTypePointer Uniform %u32\n"
" %fp_u32 = OpTypePointer Function %u32\n"
- " %uint_dummy = OpConstant %u32 2863311530\n";
+ " %uint_unused = OpConstant %u32 2863311530\n";
fragments["decoration"] =
" OpDecorate %_arr_uint_uint_128 ArrayStride 4\n"
" OpBranch %condmerge\n"
" %dead = OpLabel\n"
" %35 = OpAccessChain %_ptr_Uniform_uint %dataOutput %c_u32_0 %uint_i\n"
- " OpStore %35 %uint_dummy\n"
+ " OpStore %35 %uint_unused\n"
" OpBranch %condmerge\n"
" %condmerge = OpLabel\n"
" OpBranch %inc\n"
}
// varying is not used but it needs to be specified so lets use type_i32 for it
- string dummyVertVarying = "%BP_vertex_result = OpVariable %type_i32_optr Output\n";
- string dummyFragVarying = "%BP_vertex_result = OpVariable %type_i32_iptr Input\n";
+ string unusedVertVarying = "%BP_vertex_result = OpVariable %type_i32_optr Output\n";
+ string unusedFragVarying = "%BP_vertex_result = OpVariable %type_i32_iptr Input\n";
vertCapabilities = "";
vertExtensions = "";
vertExecutionMode = "";
fragExecutionMode = behaviorExecutionMode;
- vertIODefinitions = dummyVertVarying;
- fragIODefinitions = dummyFragVarying;
+ vertIODefinitions = unusedVertVarying;
+ fragIODefinitions = unusedFragVarying;
vertArguments = "";
fragArguments = specOpData.arguments;
// Separate sampler for sampled images
if ((DescriptorType)descNdx == DESCRIPTOR_TYPE_SAMPLED_IMAGE)
{
- vector<tcu::Vec4> dummyData;
- spec.inputs.push_back(Resource(BufferSp(new Vec4Buffer(dummyData))));
+ vector<tcu::Vec4> unusedData;
+ spec.inputs.push_back(Resource(BufferSp(new Vec4Buffer(unusedData))));
spec.inputs[1].setDescriptorType(VK_DESCRIPTOR_TYPE_SAMPLER);
}
// Separate sampler for sampled images
if ((DescriptorType)descNdx == DESCRIPTOR_TYPE_SAMPLED_IMAGE)
{
- vector<tcu::Vec4> dummyData;
- resources.inputs.push_back(Resource(BufferSp(new Vec4Buffer(dummyData)), VK_DESCRIPTOR_TYPE_SAMPLER));
+ vector<tcu::Vec4> unusedData;
+ resources.inputs.push_back(Resource(BufferSp(new Vec4Buffer(unusedData)), VK_DESCRIPTOR_TYPE_SAMPLER));
}
// Second combined image sampler with different image data
// Separate sampler for sampled images
if ((DescriptorType)descNdx == DESCRIPTOR_TYPE_SAMPLED_IMAGE)
{
- vector<Vec4> dummyData;
- resources.inputs.push_back(Resource(BufferSp(new Vec4Buffer(dummyData)), VK_DESCRIPTOR_TYPE_SAMPLER));
+ vector<Vec4> unusedData;
+ resources.inputs.push_back(Resource(BufferSp(new Vec4Buffer(unusedData)), VK_DESCRIPTOR_TYPE_SAMPLER));
}
// Second combined image sampler with different image data
const vector<deFloat16> float16Data2 = squarize(float16DataScalar, 1);
const vector<deFloat16> float16DataVec1 = squarizeVector(float16DataVector, 0); // Total Size: 2 * (square(square(sizeof(float16DataVector))))
const vector<deFloat16> float16DataVec2 = squarizeVector(float16DataVector, 1);
- const vector<deFloat16> float16OutDummy (float16Data1.size(), 0);
- const vector<deFloat16> float16OutVecDummy (float16DataVec1.size(), 0);
+ const vector<deFloat16> float16OutUnused (float16Data1.size(), 0);
+ const vector<deFloat16> float16OutVecUnused (float16DataVec1.size(), 0);
struct TestOp
{
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16Data1)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16Data2)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutDummy)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutUnused)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = nanSupported ? testOp.verifyFuncNan : testOp.verifyFuncNonNan;
extensions.push_back("VK_KHR_shader_float16_int8");
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16DataVec1)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16DataVec2)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutVecDummy)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutVecUnused)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = nanSupported ? testOp.verifyFuncNan : testOp.verifyFuncNonNan;
extensions.push_back("VK_KHR_shader_float16_int8");
const StringTemplate capabilities ("OpCapability Float16\n");
const deUint32 numDataPoints = 256;
const vector<deFloat16> float16InputData = getFloat16s(rnd, numDataPoints);
- const vector<deFloat16> float16OutputDummy (float16InputData.size(), 0);
+ const vector<deFloat16> float16OutputUnused (float16InputData.size(), 0);
map<string, string> fragments;
struct TestType
fragments["testfun"] += StringTemplate(testType.storeFunc).specialize({{"var", "ssbo_dst"}});
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16InputData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputDummy)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputUnused)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = compareFP16FunctionSetFunc;
extensions.push_back("VK_KHR_shader_float16_int8");
de::Random rnd (deStringHash(testGroup->getName()));
const deUint32 numDataPoints = 256;
const vector<deFloat16> float16InputData = getFloat16s(rnd, numDataPoints);
- const vector<deFloat16> float16OutputDummy (float16InputData.size(), 0);
+ const vector<deFloat16> float16OutputUnused (float16InputData.size(), 0);
struct TestType
{
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16InputData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.inputs.push_back(Resource(BufferSp(new Uint32Buffer(inputDataNdx)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputDummy)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputUnused)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = compareFP16VectorExtractFunc;
extensions.push_back("VK_KHR_shader_float16_int8");
const deUint32 replacement = 42;
const deUint32 numDataPoints = 256;
const vector<deFloat16> float16InputData = getFloat16s(rnd, numDataPoints);
- const vector<deFloat16> float16OutputDummy (float16InputData.size(), 0);
+ const vector<deFloat16> float16OutputUnused (float16InputData.size(), 0);
struct TestType
{
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16InputData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.inputs.push_back(Resource(BufferSp(new Uint32Buffer(inputDataNdx)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputDummy)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputUnused)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = testType.verifyIOFunc;
extensions.push_back("VK_KHR_shader_float16_int8");
const deUint32 outputStride = (dstType.typeComponents == 3) ? 4 : dstType.typeComponents;
const vector<deFloat16> float16Input0Data = getFloat16s(rnd, input0Stride * numDataPoints);
const vector<deFloat16> float16Input1Data = getFloat16s(rnd, input1Stride * numDataPoints);
- const vector<deFloat16> float16OutputDummy (outputStride * numDataPoints, 0);
+ const vector<deFloat16> float16OutputUnused (outputStride * numDataPoints, 0);
const string testName = de::toString(dstType.typeComponents) + de::toString(src0Type.typeComponents) + de::toString(src1Type.typeComponents);
deUint32 caseCount = 0;
SpecResource specResource;
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16Input0Data)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(float16Input1Data)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputDummy)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16OutputUnused)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = getFloat16VectorShuffleVerifyIOFunc(dstType.typeComponents, src0Type.typeComponents, src1Type.typeComponents);
extensions.push_back("VK_KHR_shader_float16_int8");
map<string, string> fragments;
vector<string> extensions;
vector<deFloat16> inputFP16;
- vector<deFloat16> dummyFP16Output;
+ vector<deFloat16> unusedFP16Output;
// Generate values for input
inputFP16.reserve(structItemsCount);
for (deUint32 structItemNdx = 0; structItemNdx < structItemsCount; ++structItemNdx)
inputFP16.push_back((accessPath[structItemNdx] == DE_NULL) ? exceptionValue : tcu::Float16(float(structItemNdx)).bits());
- dummyFP16Output.resize(structItemsCount);
+ unusedFP16Output.resize(structItemsCount);
// Generate cases for OpSwitch
{
}
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(inputFP16)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(dummyFP16Output)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(unusedFP16Output)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = compareFP16CompositeFunc;
extensions.push_back("VK_KHR_shader_float16_int8");
const fp16type x (*in[0]);
const double d (x.asDouble());
double i (0.0);
- const double dummy (deModf(d, &i));
+ const double unused (deModf(d, &i));
const double result (i);
- DE_UNREF(dummy);
+ DE_UNREF(unused);
if (x.isInf() || x.isNaN())
return false;
const fp16type x (*in[0]);
const double d (x.asDouble());
int e (0);
- const double dummy (deFrExp(d, &e));
+ const double unused (deFrExp(d, &e));
const double result (static_cast<double>(e));
- DE_UNREF(dummy);
+ DE_UNREF(unused);
if (x.isNaN() || x.isInf())
return false;
const Math16ArgFragments argFragmentModfInt =
{
- " %val_src0 = OpFunctionCall %${t0} %ld_arg_ssbo_src0 %ndx\n"
- "%val_dummy = ${op} %${tr} ${ext_inst} %val_src0 %tmp\n"
- " %tmp0 = OpAccessChain %fp_tmp %tmp\n"
- " %val_dst = OpLoad %${tr} %tmp0\n"
- " %dst = OpFunctionCall %void %st_fn_ssbo_dst %val_dst %ndx\n",
+ " %val_src0 = OpFunctionCall %${t0} %ld_arg_ssbo_src0 %ndx\n"
+ "%val_unused = ${op} %${tr} ${ext_inst} %val_src0 %tmp\n"
+ " %tmp0 = OpAccessChain %fp_tmp %tmp\n"
+ " %val_dst = OpLoad %${tr} %tmp0\n"
+ " %dst = OpFunctionCall %void %st_fn_ssbo_dst %val_dst %ndx\n",
- " %fp_tmp = OpTypePointer Function %${tr}\n",
+ " %fp_tmp = OpTypePointer Function %${tr}\n",
"",
- " %tmp = OpVariable %fp_tmp Function\n",
+ " %tmp = OpVariable %fp_tmp Function\n",
};
const Math16ArgFragments argFragmentModfStruct =
const Math16ArgFragments argFragmentFrexpE =
{
- " %val_src0 = OpFunctionCall %${t0} %ld_arg_ssbo_src0 %ndx\n"
- " %out_exp = OpAccessChain %fp_${dr}i32 %tmp\n"
- "%val_dummy = ${op} %${tr} ${ext_inst} %val_src0 %out_exp\n"
- "%val_dst_i = OpLoad %${dr}i32 %out_exp\n"
- " %val_dst = OpConvertSToF %${tr} %val_dst_i\n"
- " %dst = OpFunctionCall %void %st_fn_ssbo_dst %val_dst %ndx\n",
+ " %val_src0 = OpFunctionCall %${t0} %ld_arg_ssbo_src0 %ndx\n"
+ " %out_exp = OpAccessChain %fp_${dr}i32 %tmp\n"
+ "%val_unused = ${op} %${tr} ${ext_inst} %val_src0 %out_exp\n"
+ "%val_dst_i = OpLoad %${dr}i32 %out_exp\n"
+ " %val_dst = OpConvertSToF %${tr} %val_dst_i\n"
+ " %dst = OpFunctionCall %void %st_fn_ssbo_dst %val_dst %ndx\n",
"",
"",
- " %tmp = OpVariable %fp_${dr}i32 Function\n",
+ " %tmp = OpVariable %fp_${dr}i32 Function\n",
};
string load_funcs[MATH16_TYPE_LAST];
const size_t numFloatsPerArg0Type = testTypes[testFunc.typeArg0].typeArrayStride / sizeof(deFloat16);
const size_t iterations = numDataPoints / numFloatsPerArg0Type;
const size_t numFloatsPerResultType = testTypes[testFunc.typeResult].typeArrayStride / sizeof(deFloat16);
- const vector<deFloat16> float16DummyOutput (iterations * numFloatsPerResultType, 0);
+ const vector<deFloat16> float16UnusedOutput (iterations * numFloatsPerResultType, 0);
VulkanFeatures features;
SpecResource specResource;
map<string, string> specs;
specResource.inputs.push_back(Resource(BufferSp(new Float16Buffer(inputData)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
}
- specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16DummyOutput)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
+ specResource.outputs.push_back(Resource(BufferSp(new Float16Buffer(float16UnusedOutput)), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER));
specResource.verifyIO = testFunc.verifyFunc;
extensions.push_back("VK_KHR_shader_float16_int8");
enum TestType
{
TT_BASIC = 0,
- TT_DUMMY_INSTRUCTION_SET,
+ TT_NONEXISTING_INSTRUCTION_SET,
TT_LARGE_INSTRUCTION_NUMBER,
TT_MANY_PARAMETERS,
TT_ANY_CONSTANT_TYPE,
"%tmp = OpExtInst %void %extInstSet 1 %main %fileStr\n";
break;
- case TT_DUMMY_INSTRUCTION_SET:
+ case TT_NONEXISTING_INSTRUCTION_SET:
// Testing non existing instruction set
extendedInstructions =
- "%extInstSet = OpExtInstImport \"NonSemantic.P.B.DummySet\"\n";
+ "%extInstSet = OpExtInstImport \"NonSemantic.P.B.NonexistingSet\"\n";
additionalPreamble +=
"%testStrA = OpString \"this.is.test\"\n"
"%testStrB = OpString \"yet another test\"\n";
std::vector<TestData> testList =
{
{ "basic", TT_BASIC },
- { "dummy_instruction_set", TT_DUMMY_INSTRUCTION_SET },
+ { "dummy_instruction_set", TT_NONEXISTING_INSTRUCTION_SET },
{ "large_instruction_number", TT_LARGE_INSTRUCTION_NUMBER },
{ "many_parameters", TT_MANY_PARAMETERS },
{ "any_constant_type", TT_ANY_CONSTANT_TYPE },
? de::max(31u, properties.minImageExtent.height)
: de::min(deSmallestGreaterOrEquallPowerOfTwoU32(currentHeight+1), properties.maxImageExtent.height));
const vk::VkExtent2D imageSize = { imageWidth, imageHeight };
- const vk::VkExtent2D dummySize = { de::max(31u, properties.minImageExtent.width), de::max(31u, properties.minImageExtent.height) };
+ const vk::VkExtent2D unusedSize = { de::max(31u, properties.minImageExtent.width), de::max(31u, properties.minImageExtent.height) };
{
size_t presentModeNdx;
createInfos.push_back(createInfo);
- // add an extra dummy swapchain
- const vk::VkSwapchainCreateInfoKHR dummyInfo =
+ // add an extra unused swapchain
+ const vk::VkSwapchainCreateInfoKHR unusedInfo =
{
vk::VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
DE_NULL,
properties.minImageCount,
imageFormat,
imageColorSpace,
- dummySize,
+ unusedSize,
imageLayers,
imageUsage,
vk::VK_SHARING_MODE_EXCLUSIVE,
(vk::VkSwapchainKHR)0
};
- createInfos.push_back(dummyInfo);
+ createInfos.push_back(unusedInfo);
}
return createInfos;
const deUint32 numAcquirableImages = numImages - minImageCount + 1;
const auto fences = createFences(devHelper.vkd, *devHelper.device, numAcquirableImages + 1);
- deUint32 dummy;
+ deUint32 unused;
for (deUint32 i = 0; i < numAcquirableImages; ++i) {
- VK_CHECK_WSI(devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, std::numeric_limits<deUint64>::max(), (VkSemaphore)0, **fences[i], &dummy));
+ VK_CHECK_WSI(devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, std::numeric_limits<deUint64>::max(), (VkSemaphore)0, **fences[i], &unused));
}
- const auto result = devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, 0, (VkSemaphore)0, **fences[numAcquirableImages], &dummy);
+ const auto result = devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, 0, (VkSemaphore)0, **fences[numAcquirableImages], &unused);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR && result != VK_NOT_READY ){
return tcu::TestStatus::fail("Implementation failed to respond well acquiring too many images with 0 timeout");
const deUint32 numAcquirableImages = numImages - minImageCount + 1;
const auto fences = createFences(devHelper.vkd, *devHelper.device, numAcquirableImages + 1);
- deUint32 dummy;
+ deUint32 unused;
for (deUint32 i = 0; i < numAcquirableImages; ++i) {
- VK_CHECK_WSI(devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, std::numeric_limits<deUint64>::max(), (VkSemaphore)0, **fences[i], &dummy));
+ VK_CHECK_WSI(devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, std::numeric_limits<deUint64>::max(), (VkSemaphore)0, **fences[i], &unused));
}
const deUint64 millisecond = 1000000;
const deUint64 timeout = 50 * millisecond; // arbitrary realistic non-0 non-infinite timeout
- const auto result = devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, timeout, (VkSemaphore)0, **fences[numAcquirableImages], &dummy);
+ const auto result = devHelper.vkd.acquireNextImageKHR(*devHelper.device, *swapchain, timeout, (VkSemaphore)0, **fences[numAcquirableImages], &unused);
if (result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR && result != VK_TIMEOUT ){
return tcu::TestStatus::fail("Implementation failed to respond well acquiring too many images with timeout");
if (de::inRange(blockParams.weightGridWidth, widthMin, widthMax) &&
de::inRange(blockParams.weightGridHeight, heightMin, heightMax))
{
- deUint32 dummy = 0;
- deUint32& widthVariable = layout.gridWidthVariableTerm == A ? a : layout.gridWidthVariableTerm == B ? b : dummy;
- deUint32& heightVariable = layout.gridHeightVariableTerm == A ? a : layout.gridHeightVariableTerm == B ? b : dummy;
+ deUint32 defaultvalue = 0;
+ deUint32& widthVariable = layout.gridWidthVariableTerm == A ? a : layout.gridWidthVariableTerm == B ? b : defaultvalue;
+ deUint32& heightVariable = layout.gridHeightVariableTerm == A ? a : layout.gridHeightVariableTerm == B ? b : defaultvalue;
widthVariable = blockParams.weightGridWidth - layout.gridWidthConstantTerm;
heightVariable = blockParams.weightGridHeight - layout.gridHeightConstantTerm;
}
}
-// Generate a number of trivial dummy blocks to fill unneeded space in a texture.
-void generateDummyVoidExtentBlocks (deUint8* dst, size_t numBlocks)
+// Generate a number of trivial blocks to fill unneeded space in a texture.
+void generateDefaultVoidExtentBlocks (deUint8* dst, size_t numBlocks)
{
AssignBlock128 block = generateVoidExtentBlock(VoidExtentParams(false, 0, 0, 0, 0));
for (size_t ndx = 0; ndx < numBlocks; ndx++)
block.assignToMemory(&dst[ndx * BLOCK_SIZE_BYTES]);
}
-void generateDummyNormalBlocks (deUint8* dst, size_t numBlocks, int blockWidth, int blockHeight)
+void generateDefaultNormalBlocks (deUint8* dst, size_t numBlocks, int blockWidth, int blockHeight)
{
NormalBlockParams blockParams;
void generateRandomBlocks (deUint8* dst, size_t numBlocks, CompressedTexFormat format, deUint32 seed);
void generateRandomValidBlocks (deUint8* dst, size_t numBlocks, CompressedTexFormat format, TexDecompressionParams::AstcMode mode, deUint32 seed);
-void generateDummyVoidExtentBlocks (deUint8* dst, size_t numBlocks);
-void generateDummyNormalBlocks (deUint8* dst, size_t numBlocks, int blockWidth, int blockHeight);
+void generateDefaultVoidExtentBlocks (deUint8* dst, size_t numBlocks);
+void generateDefaultNormalBlocks (deUint8* dst, size_t numBlocks, int blockWidth, int blockHeight);
bool isValidBlock (const deUint8* data, CompressedTexFormat format, TexDecompressionParams::AstcMode mode);
namespace warmupCPUInternal
{
-volatile Dummy g_dummy;
+volatile Unused g_unused;
};
return floatMedian(absoluteDeviations) / median;
}
-static inline float dummyComputation (float initial, int numIterations)
+static inline float unusedComputation (float initial, int numIterations)
{
float a = initial;
int b = 123;
void warmupCPU (void)
{
- float dummy = *warmupCPUInternal::g_dummy.m_v;
+ float unused = *warmupCPUInternal::g_unused.m_v;
int computationSize = 1;
- // Do a rough calibration for computationSize to get dummyComputation's running time above a certain threshold.
+ // Do a rough calibration for computationSize to get unusedComputation's running time above a certain threshold.
while (computationSize < 1<<30) // \note This condition is unlikely to be met. The "real" loop exit is the break below.
{
const float singleMeasurementThreshold = 10000.0f;
for (int i = 0; i < numMeasurements; i++)
{
const deUint64 startTime = deGetMicroseconds();
- dummy = dummyComputation(dummy, computationSize);
+ unused = unusedComputation(unused, computationSize);
times[i] = (deInt64)(deGetMicroseconds() - startTime);
}
computationSize *= 2;
}
- // Do dummyComputations until running time seems stable enough.
+ // Do unusedComputations until running time seems stable enough.
{
const int maxNumMeasurements = 50;
const int numConsecutiveMeasurementsRequired = 5;
measurementNdx++)
{
const deUint64 startTime = deGetMicroseconds();
- dummy = dummyComputation(dummy, computationSize);
+ unused = unusedComputation(unused, computationSize);
latestTimes[measurementNdx % numConsecutiveMeasurementsRequired] = (deInt64)(deGetMicroseconds() - startTime);
}
}
- *warmupCPUInternal::g_dummy.m_v = dummy;
+ *warmupCPUInternal::g_unused.m_v = unused;
}
} // tcu
namespace tcu
{
-//! Does some dummy calculations to try and get the CPU working at full speed.
+//! Does some unused calculations to try and get the CPU working at full speed.
void warmupCPU (void);
namespace warmupCPUInternal
// \note Used in an attempt to prevent compiler from doing optimizations. Not supposed to be touched elsewhere.
-class Dummy
+class Unused
{
public:
- Dummy (void) : m_v(new float) {}
- ~Dummy (void) { delete m_v; }
+ Unused (void) : m_v(new float) {}
+ ~Unused (void) { delete m_v; }
volatile float* volatile m_v;
};
-extern volatile Dummy g_dummy;
+extern volatile Unused g_unused;
};
m_operations.clear();
}
-deUint8* Thread::getDummyData (size_t size)
+deUint8* Thread::getUnusedData (size_t size)
{
- if (m_dummyData.size() < size)
+ if (m_unusedData.size() < size)
{
- m_dummyData.resize(size);
+ m_unusedData.resize(size);
}
- return &(m_dummyData[0]);
+ return &(m_unusedData[0]);
}
void Thread::addOperation (Operation* operation)
void exec (void);
- deUint8* getDummyData (size_t size); //!< Return data pointer that contains at least size bytes. Valid until next call
+ deUint8* getUnusedData (size_t size); //!< Return data pointer that contains at least size bytes. Valid until next call
ThreadStatus getStatus (void) const { de::ScopedLock lock(m_statusLock); return m_status; }
void setStatus (ThreadStatus status) { de::ScopedLock lock(m_statusLock); m_status = status; }
std::vector<Message> m_messages;
mutable de::Mutex m_statusLock;
ThreadStatus m_status;
- std::vector<deUint8> m_dummyData;
+ std::vector<deUint8> m_unusedData;
// Disabled
Thread (const Thread&);
/* Test pushBack(). */
for (i = 0; i < 5000; i++)
{
- /* Dummy alloc to try to break alignments. */
+ /* Unused alloc to try to break alignments. */
pool.alloc(1);
arr.pushBack(i);
/* Test pushBack(). */
for (i = 0; i < 5000; i++)
{
- /* Dummy alloc to try to break alignments. */
+ /* Unused alloc to try to break alignments. */
pool.alloc(1);
arr.pushBack(i);
/* Test pushBack(). */
for (i = 0; i < 5000; i++)
{
- /* Dummy alloc to try to break alignments. */
+ /* Unused alloc to try to break alignments. */
deMemPool_alloc(pool, 1);
dePoolIntArray_pushBack(arr, i);
TYPENAME##_set(arr, bNdx, tmp); \
} \
\
-struct TYPENAME##Dummy_s { int dummy; }
+struct TYPENAME##Unused_s { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Declare a sort function for an array.
} \
} \
\
-struct TYPENAME##SORTNAME##Dummy_s { int dummy; }
+struct TYPENAME##SORTNAME##unused_s { int unused; }
/* Basic array types. */
return iter->curSlot->values[iter->curElemIndex]; \
} \
\
-struct TYPENAME##Dummy_s { int dummy; }
+struct TYPENAME##Unused_s { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Implement a template pool hash class.
DE_ASSERT(slot); \
} \
} \
-struct TYPENAME##Dummy2_s { int dummy; }
+struct TYPENAME##Unused2_s { int unused; }
/* Copy-to-array templates. */
#define DE_DECLARE_POOL_HASH_TO_ARRAY(HASHTYPENAME, KEYARRAYTYPENAME, VALUEARRAYTYPENAME) \
deBool HASHTYPENAME##_copyToArray(const HASHTYPENAME* set, DE_PTR_TYPE(KEYARRAYTYPENAME) keyArray, DE_PTR_TYPE(VALUEARRAYTYPENAME) valueArray); \
- struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_declare_dummy { int dummy; }
+ struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_declare_unused { int unused; }
#define DE_IMPLEMENT_POOL_HASH_TO_ARRAY(HASHTYPENAME, KEYARRAYTYPENAME, VALUEARRAYTYPENAME) \
deBool HASHTYPENAME##_copyToArray(const HASHTYPENAME* hash, DE_PTR_TYPE(KEYARRAYTYPENAME) keyArray, DE_PTR_TYPE(VALUEARRAYTYPENAME) valueArray) \
DE_ASSERT(arrayNdx == numElements); \
return DE_TRUE; \
} \
-struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_implement_dummy { int dummy; }
+struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_implement_unused { int unused; }
#endif /* _DEPOOLHASH_H */
TYPENAME##Array_reset(hashArray->array); \
} \
\
-struct TYPENAME##Dummy_s { int dummy; }
+struct TYPENAME##Unused_s { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Implement a template pool hash-array class.
return DE_TRUE; \
} \
\
-struct TYPENAME##Dummy2_s { int dummy; }
+struct TYPENAME##Unused2_s { int unused; }
#endif /* _DEPOOLHASHARRAY_H */
return DE_FALSE; \
} \
\
-struct TYPENAME##Dummy_s { int dummy; }
+struct TYPENAME##Unused_s { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Implement a template pool hash-set class.
#define DE_IMPLEMENT_POOL_HASH_SET(TYPENAME, KEYTYPE, VALUETYPE, KEYHASHFUNC, KEYCMPFUNC, VALUEHASHFUNC, VALUECMPFUNC) \
DE_IMPLEMENT_POOL_SET(TYPENAME##Set, VALUETYPE, VALUEHASHFUNC, VALUECMPFUNC); \
DE_IMPLEMENT_POOL_HASH(TYPENAME##Hash, KEYTYPE, TYPENAME##Set*, KEYHASHFUNC, KEYCMPFUNC); \
-struct TYPENAME##Dummy2_s { int dummy; }
+struct TYPENAME##Unused2_s { int unused; }
/* Copy-to-array templates. */
#define DE_DECLARE_POOL_HASH_TO_ARRAY(HASHTYPENAME, KEYARRAYTYPENAME, VALUEARRAYTYPENAME) \
deBool HASHTYPENAME##_copyToArray(const HASHTYPENAME* set, KEYARRAYTYPENAME* keyArray, VALUEARRAYTYPENAME* valueArray); \
- struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_declare_dummy { int dummy; }
+ struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_declare_unused { int unused; }
#define DE_IMPLEMENT_POOL_HASH_TO_ARRAY(HASHTYPENAME, KEYARRAYTYPENAME, VALUEARRAYTYPENAME) \
deBool HASHTYPENAME##_copyToArray(const HASHTYPENAME* hash, KEYARRAYTYPENAME* keyArray, VALUEARRAYTYPENAME* valueArray) \
DE_ASSERT(arrayNdx == numElements); \
return DE_TRUE; \
} \
-struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_implement_dummy { int dummy; }
+struct HASHTYPENAME##_##KEYARRAYTYPENAME##_##VALUEARRAYTYPENAME##_implement_unused { int unused; }
#endif
/* Push items -1000..1000 into heap. */
for (i = -1000; i < 1000; i++)
{
- /* Dummy alloc to try to break alignments. */
+ /* Unused alloc to try to break alignments. */
deMemPool_alloc(pool, 1);
TestHeap_push(heap, HeapItem_create(i, -i));
}
return tmp; \
} \
\
-struct TYPENAME##Dummy_s { int dummy; }
+struct TYPENAME##Unused_s { int unused; }
#endif /* _DEPOOLHEAP_H */
TYPENAME##_setKeyCount(set, key, oldCount - 1); \
} \
\
-struct TYPENAME##DeclareDummy_s { int dummy; }
+struct TYPENAME##DeclareUnused_s { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Implement a template pool multiset class.
return DE_TRUE; \
} \
\
-struct TYPENAME##ImplementDummy_s { int dummy; }
+struct TYPENAME##ImplementUnused_s { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Declare set-wise operations for a multiset template.
deBool TYPENAME##_sumInplace (DE_PTR_TYPE(TYPENAME) a, const TYPENAME* b); \
deBool TYPENAME##_difference (DE_PTR_TYPE(TYPENAME) to, const TYPENAME* a, const TYPENAME* b); \
void TYPENAME##_differenceInplace (DE_PTR_TYPE(TYPENAME) a, const TYPENAME* b); \
- struct TYPENAME##SetwiseDeclareDummy_s { int dummy; }
+ struct TYPENAME##SetwiseDeclareUnused_s { int unused; }
#define DE_IMPLEMENT_POOL_MULTISET_SETWISE_OPERATIONS(TYPENAME, KEYTYPE) \
deBool TYPENAME##_union (DE_PTR_TYPE(TYPENAME) to, const TYPENAME* a, const TYPENAME* b) \
DE_FATAL("Not implemented."); \
} \
\
-struct TYPENAME##SetwiseImplementDummy_s { int dummy; }
+struct TYPENAME##SetwiseImplementUnused_s { int unused; }
#endif /* _DEPOOLMULTISET_H */
TYPENAME##_delete(set, key); \
} \
\
-struct TYPENAME##Dummy_s { int dummy; }
+struct TYPENAME##Unused_s { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Implement a template pool set class.
} \
} \
\
-struct TYPENAME##Dummy2_s { int dummy; }
+struct TYPENAME##Unused2_s { int unused; }
/* Copy-to-array templates. */
#define DE_DECLARE_POOL_SET_TO_ARRAY(SETTYPENAME, ARRAYTYPENAME) \
deBool SETTYPENAME##_copyToArray(const SETTYPENAME* set, DE_PTR_TYPE(ARRAYTYPENAME) array); \
- struct SETTYPENAME##_##ARRAYTYPENAME##_declare_dummy { int dummy; }
+ struct SETTYPENAME##_##ARRAYTYPENAME##_declare_unused { int unused; }
#define DE_IMPLEMENT_POOL_SET_TO_ARRAY(SETTYPENAME, ARRAYTYPENAME) \
deBool SETTYPENAME##_copyToArray(const SETTYPENAME* set, DE_PTR_TYPE(ARRAYTYPENAME) array) \
DE_ASSERT(arrayNdx == numElements); \
return DE_TRUE; \
} \
- struct SETTYPENAME##_##ARRAYTYPENAME##_implement_dummy { int dummy; }
+ struct SETTYPENAME##_##ARRAYTYPENAME##_implement_unused { int unused; }
/*--------------------------------------------------------------------*//*!
* \brief Declare set-wise operations for a set template.
void TYPENAME##_intersectInplace (DE_PTR_TYPE(TYPENAME) a, const TYPENAME* b); \
deBool TYPENAME##_difference (DE_PTR_TYPE(TYPENAME) to, const TYPENAME* a, const TYPENAME* b); \
void TYPENAME##_differenceInplace (DE_PTR_TYPE(TYPENAME) a, const TYPENAME* b); \
- struct TYPENAME##SetwiseDeclareDummy_s { int dummy; }
+ struct TYPENAME##SetwiseDeclareUnused_s { int unused; }
#define DE_IMPLEMENT_POOL_SET_SETWISE_OPERATIONS(TYPENAME, KEYTYPE) \
deBool TYPENAME##_union (DE_PTR_TYPE(TYPENAME) to, const TYPENAME* a, const TYPENAME* b) \
} \
} \
\
-struct TYPENAME##UnionIntersectImplementDummy_s { int dummy; }
+struct TYPENAME##UnionIntersectImplementUnused_s { int unused; }
#endif /* _DEPOOLSET_H */
}
/* There are no sensible errors so status is always good */
-deStreamStatus dummy_getStatus (deStreamData* stream)
+deStreamStatus empty_getStatus (deStreamData* stream)
{
DE_UNREF(stream);
}
/* There are no sensible errors in ringbuffer */
-static const char* dummy_getError (deStreamData* stream)
+static const char* empty_getError (deStreamData* stream)
{
DE_ASSERT(stream);
DE_UNREF(stream);
static const deIOStreamVFTable producerStreamVFTable = {
DE_NULL,
producerStream_write,
- dummy_getError,
+ empty_getError,
producerStream_flush,
producerStream_deinit,
- dummy_getStatus
+ empty_getStatus
};
static const deIOStreamVFTable consumerStreamVFTable = {
consumerStream_read,
DE_NULL,
- dummy_getError,
+ empty_getError,
DE_NULL,
consumerStream_deinit,
- dummy_getStatus
+ empty_getStatus
};
void deProducerStream_init (deOutStream* stream, deRingbuffer* buffer)
//! Return native pointer that can be used with eglCreatePlatformWindowSurface(). Default implementation throws tcu::NotSupportedError().
virtual void* getPlatformNative (void);
- // Process window events. Defaults to dummy implementation, that does nothing.
+ // Process window events. Defaults to empty implementation, that does nothing.
virtual void processEvents (void) {}
// Get current size of window's logical surface. Default implementation throws tcu::NotSupportedError()
* currently depend on having access to the glw::Functions already in test
* case constructor; in such situations there may not be a proper render
* context available (like in test case list dumping mode). This is a
- * simple workaround for that: a dummy render context with a glw::Functions
+ * simple workaround for that: a empty render context with a glw::Functions
* containing just null pointers.
*//*--------------------------------------------------------------------*/
-class DummyRenderContext : public RenderContext
+class EmptyRenderContext : public RenderContext
{
public:
- explicit DummyRenderContext (ContextType ctxType = ContextType()) : m_ctxType(ctxType) {}
+ explicit EmptyRenderContext (ContextType ctxType = ContextType()) : m_ctxType(ctxType) {}
virtual ContextType getType (void) const { return m_ctxType; }
virtual const glw::Functions& getFunctions (void) const { return m_functions; }
}
else if (m_curToken == TOKEN_REQUIRE)
{
- vector<RequiredCapability> dummyCaps;
+ vector<RequiredCapability> unusedCaps;
size_t size = program.requiredExtensions.size();
- parseRequirement(dummyCaps, program.requiredExtensions);
+ parseRequirement(unusedCaps, program.requiredExtensions);
if (size == program.requiredExtensions.size())
parseError("only extension requirements are allowed inside pipeline program");
m_varManager.popVariableScope();
m_varManager.popValueScope();
- // Fill undefined (unused) components in inputs with dummy values
+ // Fill undefined (unused) components in inputs with unused values
fillUndefinedShaderInputs(shader.getInputs());
fillUndefinedShaderInputs(shader.getUniforms());
void TexImage2D::exec (tcu::ThreadUtil::Thread& t)
{
EGLThread& thread = dynamic_cast<EGLThread&>(t);
- void* dummyData = thread.getDummyData(m_width*m_height*4);
+ void* unusedData = thread.getUnusedData(m_width*m_height*4);
thread.newMessage() << "Begin -- glBindTexture(GL_TEXTURE_2D, " << m_texture->texture << ")" << tcu::ThreadUtil::Message::End;
GLU_CHECK_GLW_CALL(thread.gl, bindTexture(GL_TEXTURE_2D, m_texture->texture));
thread.newMessage() << "End -- glBindTexture()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glTexImage2D(GL_TEXTURE_2D, " << m_level << ", " << m_internalFormat << ", " << m_width << ", " << m_height << ", 0, " << m_format << ", " << m_type << ", data)" << tcu::ThreadUtil::Message::End;
- GLU_CHECK_GLW_CALL(thread.gl, texImage2D(GL_TEXTURE_2D, m_level, m_internalFormat, m_width, m_height, 0, m_format, m_type, dummyData));
+ GLU_CHECK_GLW_CALL(thread.gl, texImage2D(GL_TEXTURE_2D, m_level, m_internalFormat, m_width, m_height, 0, m_format, m_type, unusedData));
thread.newMessage() << "End -- glTexImage2D()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glBindTexture(GL_TEXTURE_2D, 0)" << tcu::ThreadUtil::Message::End;
void TexSubImage2D::exec (tcu::ThreadUtil::Thread& t)
{
EGLThread& thread = dynamic_cast<EGLThread&>(t);
- void* dummyData = thread.getDummyData(m_width*m_height*4);
+ void* unusedData = thread.getUnusedData(m_width*m_height*4);
thread.newMessage() << "Begin -- glBindTexture(GL_TEXTURE_2D, " << m_texture->texture << ")" << tcu::ThreadUtil::Message::End;
GLU_CHECK_GLW_CALL(thread.gl, bindTexture(GL_TEXTURE_2D, m_texture->texture));
thread.newMessage() << "End -- glBindTexture()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glTexSubImage2D(GL_TEXTURE_2D, " << m_level << ", " << m_xoffset << ", " << m_yoffset << ", " << m_width << ", " << m_height << ", 0, " << m_format << ", " << m_type << ", <data>)" << tcu::ThreadUtil::Message::End;
- GLU_CHECK_GLW_CALL(thread.gl, texSubImage2D(GL_TEXTURE_2D, m_level, m_xoffset, m_yoffset, m_width, m_height, m_format, m_type, dummyData));
+ GLU_CHECK_GLW_CALL(thread.gl, texSubImage2D(GL_TEXTURE_2D, m_level, m_xoffset, m_yoffset, m_width, m_height, m_format, m_type, unusedData));
thread.newMessage() << "End -- glSubTexImage2D()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glBindTexture(GL_TEXTURE_2D, 0)" << tcu::ThreadUtil::Message::End;
void BufferData::exec (tcu::ThreadUtil::Thread& t)
{
EGLThread& thread = dynamic_cast<EGLThread&>(t);
- void* dummyData = thread.getDummyData(m_size);
+ void* unusedData = thread.getUnusedData(m_size);
thread.newMessage() << "Begin -- glBindBuffer(" << m_target << ", " << m_buffer->buffer << ")" << tcu::ThreadUtil::Message::End;
GLU_CHECK_GLW_CALL(thread.gl, bindBuffer(m_target, m_buffer->buffer));
thread.newMessage() << "End -- glBindBuffer()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glBufferData(" << m_target << ", " << m_size << ", <DATA>, " << m_usage << ")" << tcu::ThreadUtil::Message::End;
- GLU_CHECK_GLW_CALL(thread.gl, bufferData(m_target, m_size, dummyData, m_usage));
+ GLU_CHECK_GLW_CALL(thread.gl, bufferData(m_target, m_size, unusedData, m_usage));
thread.newMessage() << "End -- glBufferData()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glBindBuffer(" << m_target << ", 0)" << tcu::ThreadUtil::Message::End;
void BufferSubData::exec (tcu::ThreadUtil::Thread& t)
{
EGLThread& thread = dynamic_cast<EGLThread&>(t);
- void* dummyData = thread.getDummyData(m_size);
+ void* unusedData = thread.getUnusedData(m_size);
thread.newMessage() << "Begin -- glBindBuffer(" << m_target << ", " << m_buffer->buffer << ")" << tcu::ThreadUtil::Message::End;
GLU_CHECK_GLW_CALL(thread.gl, bindBuffer(m_target, m_buffer->buffer));
thread.newMessage() << "End -- glBindBuffer()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glBufferSubData(" << m_target << ", " << m_offset << ", " << m_size << ", <DATA>)" << tcu::ThreadUtil::Message::End;
- GLU_CHECK_GLW_CALL(thread.gl, bufferSubData(m_target, m_offset, m_size, dummyData));
+ GLU_CHECK_GLW_CALL(thread.gl, bufferSubData(m_target, m_offset, m_size, unusedData));
thread.newMessage() << "End -- glBufferSubData()" << tcu::ThreadUtil::Message::End;
thread.newMessage() << "Begin -- glBindBuffer(" << m_target << ", 0)" << tcu::ThreadUtil::Message::End;
EGL_HEIGHT, 64,
EGL_NONE
};
- const eglu::UniqueSurface dummyPbuffer (egl, m_eglDisplay, egl.createPbufferSurface(m_eglDisplay, m_eglConfig, attribList));
+ const eglu::UniqueSurface unusedPbuffer (egl, m_eglDisplay, egl.createPbufferSurface(m_eglDisplay, m_eglConfig, attribList));
TestLog& log = m_testCtx.getLog();
CallLogWrapper wrapper (egl, log);
EGLint damageRegion[] = { 10, 10, 10, 10 };
wrapper.enableLogging(true);
m_testCtx.setTestResult(QP_TEST_RESULT_PASS, "Pass");
- EGLU_CHECK_CALL(egl, makeCurrent(m_eglDisplay, *dummyPbuffer, *dummyPbuffer, m_eglContext));
+ EGLU_CHECK_CALL(egl, makeCurrent(m_eglDisplay, *unusedPbuffer, *unusedPbuffer, m_eglContext));
{
tcu::ScopedLogSection(log, "Test2.1", "If query buffer age on a surface that is not the current draw surface --> EGL_BAD_SURFACE");
EGLU_CHECK_CALL(egl, surfaceAttrib(m_eglDisplay, m_eglSurface, EGL_SWAP_BEHAVIOR, EGL_BUFFER_DESTROYED));
EGL_HEIGHT, 64,
EGL_NONE
};
- const eglu::UniqueSurface dummyPbuffer (egl, m_eglDisplay, egl.createPbufferSurface(m_eglDisplay, m_eglConfig, attribList));
+ const eglu::UniqueSurface unusedPbuffer (egl, m_eglDisplay, egl.createPbufferSurface(m_eglDisplay, m_eglConfig, attribList));
TestLog& log = m_testCtx.getLog();
CallLogWrapper wrapper (egl, log);
EGLint damageRegion[] = { 10, 10, 10, 10 };
{
tcu::ScopedLogSection(log, "Test7", "If call setDamageRegion() on a surface that is not the current draw surface --> EGL_BAD_MATCH");
- EGLU_CHECK_CALL(egl, makeCurrent(m_eglDisplay, *dummyPbuffer, *dummyPbuffer, m_eglContext));
+ EGLU_CHECK_CALL(egl, makeCurrent(m_eglDisplay, *unusedPbuffer, *unusedPbuffer, m_eglContext));
expectFalse(wrapper.eglSetDamageRegionKHR(m_eglDisplay, m_eglSurface, damageRegion, 1));
expectError(EGL_BAD_MATCH);
}
GLU_CHECK_GLW_CALL(m_gl, vertexAttribPointer(m_coordLocation, 2, GL_FLOAT, GL_FALSE, 0, DE_NULL));
}
- // Create dummy data for filling buffer objects
+ // Create unused data for filling buffer objects
const std::vector<tcu::Vec4> refValues(s_numBindings, tcu::Vec4(0.0f, 1.0f, 1.0f, 1.0f));
if (m_isLocalArray && m_shaderType == SHADERTYPE_COMPUTE)
};
const UniformWeightArray<DE_LENGTH_OF_ARRAY(usages)> usageWeights;
- static const deUint32 blacklistedCases[]=
+ static const deUint32 disallowedCases[]=
{
3153, //!< extremely narrow triangle, results depend on sample positions
};
hash = (hash << 2) ^ (deUint32)spec.attribs[attrNdx].hash();
if (insertedHashes.find(hash) == insertedHashes.end() &&
- !de::contains(DE_ARRAY_BEGIN(blacklistedCases), DE_ARRAY_END(blacklistedCases), hash))
+ !de::contains(DE_ARRAY_BEGIN(disallowedCases), DE_ARRAY_END(disallowedCases), hash))
{
// Only aligned cases
if (spec.isCompatibilityTest() != gls::DrawTestSpec::COMPATIBILITY_UNALIGNED_OFFSET &&
SETUNIFORM(loc, 1, vec.getPtr()); \
CHECK_SET_UNIFORM(name); \
} \
-struct SetUniform##VECTYPE##Dummy_s { int unused; }
+struct SetUniform##VECTYPE##Unused_s { int unused; }
#define MAKE_SET_VEC_UNIFORM_PTR(VECTYPE, SETUNIFORM) \
void setUniform (const glw::Functions& gl, deUint32 programID, const char* name, const tcu::VECTYPE* vec, int arraySize) \
SETUNIFORM(loc, arraySize, vec->getPtr()); \
CHECK_SET_UNIFORM(name); \
} \
-struct SetUniformPtr##VECTYPE##Dummy_s { int unused; }
+struct SetUniformPtr##VECTYPE##Unused_s { int unused; }
MAKE_SET_VEC_UNIFORM (Vec2, gl.uniform2fv);
MAKE_SET_VEC_UNIFORM (Vec3, gl.uniform3fv);
ShaderCompilerCase::IterateResult ShaderCompilerCase::iterate (void)
{
- // Before actual measurements, compile and draw with a dummy shader to avoid possible initial slowdowns in the actual test.
+ // Before actual measurements, compile and draw with a minimal shader to avoid possible initial slowdowns in the actual test.
{
deUint32 specID = getSpecializationID(0);
ProgramContext progCtx;
DE_ASSERT(shaderValidity != SHADER_VALIDITY_LAST);
- // Before actual measurements, compile a dummy shader to avoid possible initial slowdowns in the actual test.
+ // Before actual measurements, compile a minimal shader to avoid possible initial slowdowns in the actual test.
{
deUint32 specID = getSpecializationID(0);
ProgramContext progCtx;
};
const UniformWeightArray<DE_LENGTH_OF_ARRAY(usages)> usageWeights;
- const deUint32 blacklistedCases[]=
+ const deUint32 disallowedCases[]=
{
3153, //!< extremely narrow triangle, results depend on sample positions
};
hash = (hash << 2) ^ (deUint32)spec.attribs[attrNdx].hash();
if (insertedHashes.find(hash) == insertedHashes.end() &&
- std::find(DE_ARRAY_BEGIN(blacklistedCases), DE_ARRAY_END(blacklistedCases), hash) == DE_ARRAY_END(blacklistedCases))
+ std::find(DE_ARRAY_BEGIN(disallowedCases), DE_ARRAY_END(disallowedCases), hash) == DE_ARRAY_END(disallowedCases))
{
// Only unaligned cases
if (spec.isCompatibilityTest() == gls::DrawTestSpec::COMPATIBILITY_UNALIGNED_OFFSET ||
const Surface& result,
const tcu::RGBA& thresholdRGBA,
const IVec2& blockSize,
- int numNonDummyBlocks,
+ int numUsedBlocks,
IVec2& firstFailedBlockCoordDst,
Surface& errorMaskDst,
IVec4& maxDiffDst)
{
const IVec2 blockCoord = IVec2(x, y) / blockSize;
- if (blockCoord.y()*numXBlocks + blockCoord.x() < numNonDummyBlocks)
+ if (blockCoord.y()*numXBlocks + blockCoord.x() < numUsedBlocks)
{
const IVec4 refPix = reference.getPixel(x, y).toIVec();
const int imageWidth = numXBlocksPerImage * blockSize.x();
const int imageHeight = numYBlocksPerImage * blockSize.y();
const int numBlocksRemaining = totalNumBlocks - m_numBlocksTested;
- const int curNumNonDummyBlocks = de::min(numBlocksPerImage, numBlocksRemaining);
- const int curNumDummyBlocks = numBlocksPerImage - curNumNonDummyBlocks;
+ const int curNumUsedBlocks = de::min(numBlocksPerImage, numBlocksRemaining);
+ const int curNumUnusedBlocks = numBlocksPerImage - curNumUsedBlocks;
const glu::RenderContext& renderCtx = m_context.getRenderContext();
const tcu::RGBA threshold = renderCtx.getRenderTarget().getPixelFormat().getColorThreshold() + (tcu::isAstcSRGBFormat(m_format) ? tcu::RGBA(2,2,2,2) : tcu::RGBA(1,1,1,1));
tcu::CompressedTexture compressed (m_format, imageWidth, imageHeight);
}
DE_ASSERT(compressed.getDataSize() == numBlocksPerImage*tcu::astc::BLOCK_SIZE_BYTES);
- deMemcpy(compressed.getData(), &m_blockData[m_numBlocksTested*tcu::astc::BLOCK_SIZE_BYTES], curNumNonDummyBlocks*tcu::astc::BLOCK_SIZE_BYTES);
- if (curNumDummyBlocks > 1)
- tcu::astc::generateDummyVoidExtentBlocks((deUint8*)compressed.getData() + curNumNonDummyBlocks*tcu::astc::BLOCK_SIZE_BYTES, curNumDummyBlocks);
+ deMemcpy(compressed.getData(), &m_blockData[m_numBlocksTested*tcu::astc::BLOCK_SIZE_BYTES], curNumUsedBlocks*tcu::astc::BLOCK_SIZE_BYTES);
+ if (curNumUsedBlocks > 1)
+ tcu::astc::generateDefaultVoidExtentBlocks((deUint8*)compressed.getData() + curNumUsedBlocks*tcu::astc::BLOCK_SIZE_BYTES, curNumUnusedBlocks);
// Create texture and render.
Surface errorMask;
IVec2 firstFailedBlockCoord;
IVec4 maxDiff;
- const bool compareOk = compareBlockImages(referenceFrame, renderedFrame, threshold, blockSize, curNumNonDummyBlocks, firstFailedBlockCoord, errorMask, maxDiff);
+ const bool compareOk = compareBlockImages(referenceFrame, renderedFrame, threshold, blockSize, curNumUsedBlocks, firstFailedBlockCoord, errorMask, maxDiff);
if (m_currentIteration == 0 || !compareOk)
{
{
tcu::ScopedLogSection section(log, "Iteration " + de::toString(m_currentIteration),
- "Blocks " + de::toString(m_numBlocksTested) + " to " + de::toString(m_numBlocksTested + curNumNonDummyBlocks - 1));
+ "Blocks " + de::toString(m_numBlocksTested) + " to " + de::toString(m_numBlocksTested + curNumUsedBlocks - 1));
- if (curNumDummyBlocks > 0)
- log << TestLog::Message << "Note: Only the first " << curNumNonDummyBlocks << " blocks in the image are relevant; rest " << curNumDummyBlocks << " are dummies and not checked" << TestLog::EndMessage;
+ if (curNumUsedBlocks > 0)
+ log << TestLog::Message << "Note: Only the first " << curNumUsedBlocks << " blocks in the image are relevant; rest " << curNumUnusedBlocks << " are dummies and not checked" << TestLog::EndMessage;
if (!compareOk)
{
}
}
- if (m_numBlocksTested + curNumNonDummyBlocks < totalNumBlocks)
+ if (m_numBlocksTested + curNumUsedBlocks < totalNumBlocks)
log << TestLog::Message << "Note: not logging further images unless reference comparison fails" << TestLog::EndMessage;
}
}
m_currentIteration++;
- m_numBlocksTested += curNumNonDummyBlocks;
+ m_numBlocksTested += curNumUsedBlocks;
if (m_numBlocksTested >= totalNumBlocks)
{
tcu::CompressedTexture compressed (m_format, imageWidth, imageHeight);
DE_ASSERT(compressed.getDataSize() == totalNumBlocks*tcu::astc::BLOCK_SIZE_BYTES);
- tcu::astc::generateDummyNormalBlocks((deUint8*)compressed.getData(), totalNumBlocks, blockSize.x(), blockSize.y());
+ tcu::astc::generateDefaultNormalBlocks((deUint8*)compressed.getData(), totalNumBlocks, blockSize.x(), blockSize.y());
// Create texture and render.
};
const UniformWeightArray<DE_LENGTH_OF_ARRAY(usages)> usageWeights;
- static const deUint32 blacklistedCases[]=
+ static const deUint32 disallowedCases[]=
{
544, //!< extremely narrow triangle
};
if (insertedHashes.find(hash) == insertedHashes.end())
{
- // Only properly aligned and not blacklisted cases
+ // Only properly aligned and not disallowed cases
if (spec.isCompatibilityTest() != gls::DrawTestSpec::COMPATIBILITY_UNALIGNED_OFFSET &&
spec.isCompatibilityTest() != gls::DrawTestSpec::COMPATIBILITY_UNALIGNED_STRIDE &&
- !de::contains(DE_ARRAY_BEGIN(blacklistedCases), DE_ARRAY_END(blacklistedCases), hash))
+ !de::contains(DE_ARRAY_BEGIN(disallowedCases), DE_ARRAY_END(disallowedCases), hash))
{
this->addChild(new gls::DrawTest(m_testCtx, m_context.getRenderContext(), spec, de::toString(insertedCount).c_str(), spec.getDesc().c_str()));
}
// Bound FBO to test target and default to other
if (m_boundTarget != GL_FRAMEBUFFER)
{
- // Dummy fbo is used as complemeting target (read when discarding draw for example).
+ // Unused fbo is used as complemeting target (read when discarding draw for example).
// \note Framework takes care of deleting objects at the end of test case.
- const deUint32 dummyTarget = m_boundTarget == GL_DRAW_FRAMEBUFFER ? GL_READ_FRAMEBUFFER : GL_DRAW_FRAMEBUFFER;
- deUint32 dummyFbo = 0;
- deUint32 dummyColorRbo = 0;
+ const deUint32 unusedTarget = m_boundTarget == GL_DRAW_FRAMEBUFFER ? GL_READ_FRAMEBUFFER : GL_DRAW_FRAMEBUFFER;
+ deUint32 unusedFbo = 0;
+ deUint32 unusedColorRbo = 0;
- glGenRenderbuffers (1, &dummyColorRbo);
- glBindRenderbuffer (GL_RENDERBUFFER, dummyColorRbo);
+ glGenRenderbuffers (1, &unusedColorRbo);
+ glBindRenderbuffer (GL_RENDERBUFFER, unusedColorRbo);
glRenderbufferStorage (GL_RENDERBUFFER, GL_RGBA8, 64, 64);
- glGenFramebuffers (1, &dummyFbo);
- glBindFramebuffer (dummyTarget, dummyFbo);
- glFramebufferRenderbuffer (dummyTarget, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, dummyColorRbo);
+ glGenFramebuffers (1, &unusedFbo);
+ glBindFramebuffer (unusedTarget, unusedFbo);
+ glFramebufferRenderbuffer (unusedTarget, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, unusedColorRbo);
glBindFramebuffer (m_boundTarget, fbo);
}
{
glu::ShaderProgram srcProgram (m_context.getRenderContext(), glu::makeVtxFragSources(vertexShaderSource, fragmentShaderSource));
GLuint dstProgram = glCreateProgram();
- GLuint dummyShader = glCreateShader(GL_VERTEX_SHADER);
+ GLuint unusedShader = glCreateShader(GL_VERTEX_SHADER);
GLenum binaryFormat = -1;
GLsizei binaryLength = -1;
std::vector<deUint8> binaryBuf;
expectError (GL_NO_ERROR);
m_log << TestLog::Section("", "GL_INVALID_OPERATION is generated if program is not the name of an existing program object.");
- glProgramBinary (dummyShader, binaryFormat, &binaryBuf[0], binaryLength);
+ glProgramBinary (unusedShader, binaryFormat, &binaryBuf[0], binaryLength);
expectError (GL_INVALID_OPERATION);
m_log << TestLog::EndSection;
m_log << TestLog::EndSection;
}
- glDeleteShader(dummyShader);
+ glDeleteShader(unusedShader);
glDeleteProgram(dstProgram);
});
ES3F_ADD_API_CASE(program_parameteri, "Invalid glProgramParameteri() usage",
{
GLuint shader = glCreateShader(GL_VERTEX_SHADER);
glu::ShaderProgram program (m_context.getRenderContext(), glu::makeVtxFragSources(uniformTestVertSource, uniformTestFragSource));
- GLuint dummyUniformIndex = 1;
- GLint dummyParamDst = -1;
+ GLuint unusedUniformIndex = 1;
+ GLint unusedParamDst = -1;
GLint numActiveUniforms = -1;
glUseProgram(program.getProgram());
m_log << TestLog::Message << "// GL_ACTIVE_UNIFORMS = " << numActiveUniforms << " (expected 4)." << TestLog::EndMessage;
m_log << TestLog::Section("", "GL_INVALID_VALUE is generated if program is not a value generated by OpenGL.");
- glGetActiveUniformsiv(-1, 1, &dummyUniformIndex, GL_UNIFORM_TYPE, &dummyParamDst);
+ glGetActiveUniformsiv(-1, 1, &unusedUniformIndex, GL_UNIFORM_TYPE, &unusedParamDst);
expectError(GL_INVALID_VALUE);
m_log << TestLog::EndSection;
m_log << TestLog::Section("", "GL_INVALID_OPERATION is generated if program is not a program object.");
- glGetActiveUniformsiv(shader, 1, &dummyUniformIndex, GL_UNIFORM_TYPE, &dummyParamDst);
+ glGetActiveUniformsiv(shader, 1, &unusedUniformIndex, GL_UNIFORM_TYPE, &unusedParamDst);
expectError(GL_INVALID_OPERATION);
m_log << TestLog::EndSection;
invalidUniformIndices.push_back(numActiveUniforms-1+excess);
invalidUniformIndices.push_back(1);
- std::vector<GLint> dummyParamsDst(invalidUniformIndices.size());
- glGetActiveUniformsiv(program.getProgram(), (GLsizei)invalidUniformIndices.size(), &invalidUniformIndices[0], GL_UNIFORM_TYPE, &dummyParamsDst[0]);
+ std::vector<GLint> unusedParamsDst(invalidUniformIndices.size());
+ glGetActiveUniformsiv(program.getProgram(), (GLsizei)invalidUniformIndices.size(), &invalidUniformIndices[0], GL_UNIFORM_TYPE, &unusedParamsDst[0]);
expectError(excess == 0 ? GL_NO_ERROR : GL_INVALID_VALUE);
}
m_log << TestLog::EndSection;
m_log << TestLog::Section("", "GL_INVALID_ENUM is generated if pname is not an accepted token.");
- glGetActiveUniformsiv(program.getProgram(), 1, &dummyUniformIndex, -1, &dummyParamDst);
+ glGetActiveUniformsiv(program.getProgram(), 1, &unusedUniformIndex, -1, &unusedParamDst);
expectError(GL_INVALID_ENUM);
m_log << TestLog::EndSection;
const IVec3 blockPixels = getBlockPixelSize(tcuFormat);
{
const size_t blockBytes = getBlockSize(tcuFormat);
- const vector<deUint8> dummyData (blockBytes);
+ const vector<deUint8> unusedData (blockBytes);
- glCompressedTexImage2D(GL_TEXTURE_2D, 0, format, blockPixels.x(), blockPixels.y(), 0, (int)blockBytes, &dummyData[0]);
+ glCompressedTexImage2D(GL_TEXTURE_2D, 0, format, blockPixels.x(), blockPixels.y(), 0, (int)blockBytes, &unusedData[0]);
expectError(GL_INVALID_ENUM);
}
FOR_CUBE_FACES(faceGL,
{
const deInt32 cubeSize = blockPixels.x() * blockPixels.y(); // Divisible by the block size and square
const size_t blockBytes = getBlockSize(tcuFormat) * cubeSize; // We have a x * y grid of blocks
- const vector<deUint8> dummyData (blockBytes);
+ const vector<deUint8> unusedData (blockBytes);
- glCompressedTexImage2D(faceGL, 0, format, cubeSize, cubeSize, 0, (int)blockBytes, &dummyData[0]);
+ glCompressedTexImage2D(faceGL, 0, format, cubeSize, cubeSize, 0, (int)blockBytes, &unusedData[0]);
expectError(GL_INVALID_ENUM);
});
}
const CompressedTexFormat tcuFormat = mapGLCompressedTexFormat(format);
const IVec3 blockPixels = getBlockPixelSize(tcuFormat);
const size_t blockBytes = getBlockSize(tcuFormat);
- const vector<deUint8> dummyData (blockBytes);
+ const vector<deUint8> unusedData (blockBytes);
- glCompressedTexImage3D(GL_TEXTURE_3D, 0, format, blockPixels.x(), blockPixels.y(), blockPixels.z(), 0, (int)blockBytes, &dummyData[0]);
+ glCompressedTexImage3D(GL_TEXTURE_3D, 0, format, blockPixels.x(), blockPixels.y(), blockPixels.z(), 0, (int)blockBytes, &unusedData[0]);
expectError(requiredError);
}
}
SETUNIFORM(loc, 1, vec.getPtr()); \
CHECK_SET_UNIFORM(name); \
} \
-struct SetUniform##VECTYPE##Dummy_s { int unused; }
+struct SetUniform##VECTYPE##Unused_s { int unused; }
#define MAKE_SET_VEC_UNIFORM_PTR(VECTYPE, SETUNIFORM) \
void setUniform (const glw::Functions& gl, deUint32 programID, const char* name, const tcu::VECTYPE* vec, int arraySize) \
SETUNIFORM(loc, arraySize, vec->getPtr()); \
CHECK_SET_UNIFORM(name); \
} \
-struct SetUniformPtr##VECTYPE##Dummy_s { int unused; }
+struct SetUniformPtr##VECTYPE##Unused_s { int unused; }
MAKE_SET_VEC_UNIFORM (Vec2, gl.uniform2fv);
MAKE_SET_VEC_UNIFORM (Vec3, gl.uniform3fv);
using de::meta::EnableIf;
using de::meta::Not;
-static const char* const s_dummyVertexShader = "#version 300 es\n"
+static const char* const s_minimalVertexShader = "#version 300 es\n"
"in highp vec4 a_position;\n"
"void main (void)\n"
"{\n"
" gl_Position = a_position;\n"
"}\n";
-static const char* const s_dummyFragnentShader = "#version 300 es\n"
+static const char* const s_minimalFragnentShader = "#version 300 es\n"
"layout(location = 0) out mediump vec4 dEQP_FragColor;\n"
"void main (void)\n"
"{\n"
}
}
-static float dummyCalculation (float initial, int workSize)
+static float busyworkCalculation (float initial, int workSize)
{
float a = initial;
int b = 123;
{
const deUint64 maxSingleWaitTime = 1000; // 1ms
const deUint64 endTime = deGetMicroseconds() + microseconds;
- float dummy = *tcu::warmupCPUInternal::g_dummy.m_v;
+ float unused = *tcu::warmupCPUInternal::g_unused.m_v;
int workSize = 500;
// exponentially increase work, cap to 1ms
const deUint64 startTime = deGetMicroseconds();
deUint64 totalTime;
- dummy = dummyCalculation(dummy, workSize);
+ unused = busyworkCalculation(unused, workSize);
totalTime = deGetMicroseconds() - startTime;
// "wait"
while (deGetMicroseconds() < endTime)
- dummy = dummyCalculation(dummy, workSize);
+ unused = busyworkCalculation(unused, workSize);
- *tcu::warmupCPUInternal::g_dummy.m_v = dummy;
+ *tcu::warmupCPUInternal::g_unused.m_v = unused;
}
// Sample from given values using linear interpolation at a given position as if values were laid to range [0, 1]
enum
{
- DUMMY_RENDER_AREA_SIZE = 32
+ UNUSED_RENDER_AREA_SIZE = 32
};
- glu::ShaderProgram* m_dummyProgram;
- deInt32 m_dummyProgramPosLoc;
+ glu::ShaderProgram* m_minimalProgram;
+ deInt32 m_minimalProgramPosLoc;
deUint32 m_bufferID;
const int m_numSamples;
template <typename SampleType>
BasicBufferCase<SampleType>::BasicBufferCase (Context& context, const char* name, const char* desc, int bufferSizeMin, int bufferSizeMax, int numSamples, int flags)
: TestCase (context, tcu::NODETYPE_PERFORMANCE, name, desc)
- , m_dummyProgram (DE_NULL)
- , m_dummyProgramPosLoc (-1)
+ , m_minimalProgram (DE_NULL)
+ , m_minimalProgramPosLoc (-1)
, m_bufferID (0)
, m_numSamples (numSamples)
, m_bufferSizeMin (bufferSizeMin)
if (!m_useGL)
return;
- // \note Viewport size is not checked, it won't matter if the render target actually is smaller hhan DUMMY_RENDER_AREA_SIZE
+ // \note Viewport size is not checked, it won't matter if the render target actually is smaller than UNUSED_RENDER_AREA_SIZE
- // dummy shader
+ // minimal shader
- m_dummyProgram = new glu::ShaderProgram(m_context.getRenderContext(), glu::ProgramSources() << glu::VertexSource(s_dummyVertexShader) << glu::FragmentSource(s_dummyFragnentShader));
- if (!m_dummyProgram->isOk())
+ m_minimalProgram = new glu::ShaderProgram(m_context.getRenderContext(), glu::ProgramSources() << glu::VertexSource(s_minimalVertexShader) << glu::FragmentSource(s_minimalFragnentShader));
+ if (!m_minimalProgram->isOk())
{
- m_testCtx.getLog() << *m_dummyProgram;
+ m_testCtx.getLog() << *m_minimalProgram;
throw tcu::TestError("failed to build shader program");
}
- m_dummyProgramPosLoc = gl.getAttribLocation(m_dummyProgram->getProgram(), "a_position");
- if (m_dummyProgramPosLoc == -1)
+ m_minimalProgramPosLoc = gl.getAttribLocation(m_minimalProgram->getProgram(), "a_position");
+ if (m_minimalProgramPosLoc == -1)
throw tcu::TestError("a_position location was -1");
}
m_bufferID = 0;
}
- delete m_dummyProgram;
- m_dummyProgram = DE_NULL;
+ delete m_minimalProgram;
+ m_minimalProgram = DE_NULL;
}
template <typename SampleType>
de::Random rnd (0x1234);
deUint32 bufferIDs[numRandomBuffers] = {0};
- gl.useProgram(m_dummyProgram->getProgram());
- gl.viewport(0, 0, DUMMY_RENDER_AREA_SIZE, DUMMY_RENDER_AREA_SIZE);
- gl.enableVertexAttribArray(m_dummyProgramPosLoc);
+ gl.useProgram(m_minimalProgram->getProgram());
+ gl.viewport(0, 0, UNUSED_RENDER_AREA_SIZE, UNUSED_RENDER_AREA_SIZE);
+ gl.enableVertexAttribArray(m_minimalProgramPosLoc);
for (int ndx = 0; ndx < numRepeats; ++ndx)
{
if (rnd.getBool())
{
- gl.vertexAttribPointer(m_dummyProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
+ gl.vertexAttribPointer(m_minimalProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
gl.drawArrays(GL_POINTS, 0, 1);
gl.drawArrays(GL_POINTS, randomSize / (int)sizeof(float[4]) - 1, 1);
}
gl.bindBuffer(GL_ARRAY_BUFFER, bufferIDs[randomBufferNdx]);
gl.bufferData(GL_ARRAY_BUFFER, randomSize, &zeroData[0], usage);
- gl.vertexAttribPointer(m_dummyProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
+ gl.vertexAttribPointer(m_minimalProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
gl.drawArrays(GL_POINTS, 0, 1);
gl.drawArrays(GL_POINTS, randomSize / (int)sizeof(float[4]) - 1, 1);
template <typename SampleType>
void BasicBufferCase<SampleType>::waitGLResults (void)
{
- tcu::Surface dummySurface(DUMMY_RENDER_AREA_SIZE, DUMMY_RENDER_AREA_SIZE);
- glu::readPixels(m_context.getRenderContext(), 0, 0, dummySurface.getAccess());
+ tcu::Surface unusedSurface(UNUSED_RENDER_AREA_SIZE, UNUSED_RENDER_AREA_SIZE);
+ glu::readPixels(m_context.getRenderContext(), 0, 0, unusedSurface.getAccess());
}
template <typename SampleType>
virtual void testBufferUpload (UploadSampleResult<SampleType>& result, int writeSize) = 0;
void logAndSetTestResult (const std::vector<UploadSampleResult<SampleType> >& results);
- deUint32 m_dummyBufferID;
+ deUint32 m_unusedBufferID;
protected:
const CaseType m_caseType;
using BasicBufferCase<SampleType>::m_testCtx;
using BasicBufferCase<SampleType>::m_context;
- using BasicBufferCase<SampleType>::DUMMY_RENDER_AREA_SIZE;
- using BasicBufferCase<SampleType>::m_dummyProgram;
- using BasicBufferCase<SampleType>::m_dummyProgramPosLoc;
+ using BasicBufferCase<SampleType>::UNUSED_RENDER_AREA_SIZE;
+ using BasicBufferCase<SampleType>::m_minimalProgram;
+ using BasicBufferCase<SampleType>::m_minimalProgramPosLoc;
using BasicBufferCase<SampleType>::m_bufferID;
using BasicBufferCase<SampleType>::m_numSamples;
using BasicBufferCase<SampleType>::m_bufferSizeMin;
template <typename SampleType>
BasicUploadCase<SampleType>::BasicUploadCase (Context& context, const char* name, const char* desc, int bufferSizeMin, int bufferSizeMax, int numSamples, deUint32 bufferUsage, CaseType caseType, ResultType resultType, int flags)
: BasicBufferCase<SampleType> (context, name, desc, bufferSizeMin, bufferSizeMax, numSamples, (caseType == CASE_USED_LARGER_BUFFER) ? (BasicBufferCase<SampleType>::FLAG_ALLOCATE_LARGER_BUFFER) : (0))
- , m_dummyBufferID (0)
+ , m_unusedBufferID (0)
, m_caseType (caseType)
, m_resultType (resultType)
, m_bufferUsage (bufferUsage)
// zero buffer as upload source
m_zeroData.resize(m_bufferSizeMax, 0x00);
- // dummy buffer
+ // unused buffer
- gl.genBuffers(1, &m_dummyBufferID);
+ gl.genBuffers(1, &m_unusedBufferID);
GLU_EXPECT_NO_ERROR(gl.getError(), "Gen buf");
// log basic info
template <typename SampleType>
void BasicUploadCase<SampleType>::deinit (void)
{
- if (m_dummyBufferID)
+ if (m_unusedBufferID)
{
- m_context.getRenderContext().getFunctions().deleteBuffers(1, &m_dummyBufferID);
- m_dummyBufferID = 0;
+ m_context.getRenderContext().getFunctions().deleteBuffers(1, &m_unusedBufferID);
+ m_unusedBufferID = 0;
}
m_zeroData = std::vector<deUint8>();
de::Random rng (0xbadc * iteration);
const int sizeDelta = rng.getInt(0, 2097140);
- const int dummyUploadSize = deAlign32(1048576 + sizeDelta, 4*4); // Vary buffer size to make sure it is always reallocated
- const std::vector<deUint8> dummyData (dummyUploadSize, 0x20);
+ const int unusedUploadSize = deAlign32(1048576 + sizeDelta, 4*4); // Vary buffer size to make sure it is always reallocated
+ const std::vector<deUint8> unusedData (unusedUploadSize, 0x20);
- gl.bindBuffer(GL_ARRAY_BUFFER, m_dummyBufferID);
- gl.bufferData(GL_ARRAY_BUFFER, dummyUploadSize, &dummyData[0], m_bufferUsage);
+ gl.bindBuffer(GL_ARRAY_BUFFER, m_unusedBufferID);
+ gl.bufferData(GL_ARRAY_BUFFER, unusedUploadSize, &unusedData[0], m_bufferUsage);
// make sure upload won't interfere with the test
- useBuffer(dummyUploadSize);
+ useBuffer(unusedUploadSize);
// don't kill the buffer so that the following upload cannot potentially reuse the buffer
gl.bufferData(GL_ARRAY_BUFFER, bufferSize, DE_NULL, m_bufferUsage);
else
{
- const std::vector<deUint8> dummyData(bufferSize, 0x20);
- gl.bufferData(GL_ARRAY_BUFFER, bufferSize, &dummyData[0], m_bufferUsage);
+ const std::vector<deUint8> unusedData(bufferSize, 0x20);
+ gl.bufferData(GL_ARRAY_BUFFER, bufferSize, &unusedData[0], m_bufferUsage);
}
if (m_caseType == CASE_UNSPECIFIED_BUFFER || m_caseType == CASE_SPECIFIED_BUFFER)
if (m_bufferUnspecifiedContent)
{
- const std::vector<deUint8> dummyData(bufferSize, 0x20);
- gl.bufferData(GL_ARRAY_BUFFER, bufferSize, &dummyData[0], m_bufferUsage);
+ const std::vector<deUint8> unusedData(bufferSize, 0x20);
+ gl.bufferData(GL_ARRAY_BUFFER, bufferSize, &unusedData[0], m_bufferUsage);
GLU_EXPECT_NO_ERROR(gl.getError(), "re-specify buffer");
}
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
- gl.useProgram(m_dummyProgram->getProgram());
+ gl.useProgram(m_minimalProgram->getProgram());
- gl.viewport(0, 0, DUMMY_RENDER_AREA_SIZE, DUMMY_RENDER_AREA_SIZE);
- gl.vertexAttribPointer(m_dummyProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
- gl.enableVertexAttribArray(m_dummyProgramPosLoc);
+ gl.viewport(0, 0, UNUSED_RENDER_AREA_SIZE, UNUSED_RENDER_AREA_SIZE);
+ gl.vertexAttribPointer(m_minimalProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
+ gl.enableVertexAttribArray(m_minimalProgramPosLoc);
// use whole buffer to make sure buffer is uploaded by drawing first and last
DE_ASSERT(bufferSize % (int)sizeof(float[4]) == 0);
using BasicBufferCase<SampleType>::m_testCtx;
using BasicBufferCase<SampleType>::m_context;
- using BasicBufferCase<SampleType>::DUMMY_RENDER_AREA_SIZE;
- using BasicBufferCase<SampleType>::m_dummyProgram;
- using BasicBufferCase<SampleType>::m_dummyProgramPosLoc;
+ using BasicBufferCase<SampleType>::UNUSED_RENDER_AREA_SIZE;
+ using BasicBufferCase<SampleType>::m_minimalProgram;
+ using BasicBufferCase<SampleType>::m_minimalProgramPosLoc;
using BasicBufferCase<SampleType>::m_bufferID;
using BasicBufferCase<SampleType>::m_numSamples;
using BasicBufferCase<SampleType>::m_bufferSizeMin;
// Set state for drawing so that we don't have to change these during the iteration
{
- gl.useProgram(m_dummyProgram->getProgram());
- gl.viewport(0, 0, DUMMY_RENDER_AREA_SIZE, DUMMY_RENDER_AREA_SIZE);
- gl.enableVertexAttribArray(m_dummyProgramPosLoc);
+ gl.useProgram(m_minimalProgram->getProgram());
+ gl.viewport(0, 0, UNUSED_RENDER_AREA_SIZE, UNUSED_RENDER_AREA_SIZE);
+ gl.enableVertexAttribArray(m_minimalProgramPosLoc);
}
}
gl.bufferData(GL_ARRAY_BUFFER, bufferSize, &m_zeroData[0], m_bufferUsage);
// ...use it...
- gl.vertexAttribPointer(m_dummyProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
+ gl.vertexAttribPointer(m_minimalProgramPosLoc, 4, GL_FLOAT, GL_FALSE, 0, DE_NULL);
drawBufferRange(0, bufferSize);
// ..and make sure it is uploaded
void RenderPerformanceTestBase::waitGLResults (void) const
{
- tcu::Surface dummySurface(RENDER_AREA_SIZE, RENDER_AREA_SIZE);
- glu::readPixels(m_context.getRenderContext(), 0, 0, dummySurface.getAccess());
+ tcu::Surface unusedSurface(RENDER_AREA_SIZE, RENDER_AREA_SIZE);
+ glu::readPixels(m_context.getRenderContext(), 0, 0, unusedSurface.getAccess());
}
template <typename SampleType>
m_results.back().result.numVertices = getLayeredGridNumVertices(m_results.back().scene);
- // test cases set these, initialize to dummy values
+ // test cases set these, initialize to unused values
m_results.back().result.renderDataSize = -1;
m_results.back().result.uploadedDataSize = -1;
m_results.back().result.unrelatedDataSize = -1;
UploadWaitDrawCase::IterateResult UploadWaitDrawCase::iterate (void)
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
- const int betweenIterationDummyFrameCount = 5; // draw misc between test samples
+ const int betweenIterationFrameCount = 5; // draw misc between test samples
const int frameNdx = m_frameNdx++;
const int currentSampleNdx = m_iterationOrder[m_sampleNdx];
// Simulate work for about 8ms
busyWait(8000);
- // Dummy rendering during dummy frames
+ // Busywork rendering during unused frames
if (frameNdx != m_samples[currentSampleNdx].numFrames)
{
// draw similar from another buffer
if (m_bufferState == BUFFERSTATE_NEW)
reuseAndDeleteBuffer();
}
- else if (frameNdx == m_samples[currentSampleNdx].numFrames + betweenIterationDummyFrameCount)
+ else if (frameNdx == m_samples[currentSampleNdx].numFrames + betweenIterationFrameCount)
{
// next sample
++m_sampleNdx;
log << TestLog::Message << "Testing effects of culled fragment workload on render time" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullsceen quads. The first (occluding) quad uses a trivial shader,"
"the second (occluded) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in no correlation between workload and render time" << TestLog::EndMessage;
log << TestLog::EndSection;
log << TestLog::Message << "Testing effects of culled fragment workload on render time" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullsceen quads. The first (occluding) quad uses a trivial shader,"
"the second (occluded) contains significant fragment shader work and a discard that is never triggers but has a dynamic condition" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in no correlation between workload and render time" << TestLog::EndMessage;
log << TestLog::EndSection;
log << TestLog::Message << "Testing effects of partially discarded occluder on rendering time" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullsceen quads. The first (occluding) quad discards half the "
"fragments in a grid pattern, the second (partially occluded) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in depth testing halving the render time" << TestLog::EndMessage;
log << TestLog::EndSection;
log << TestLog::Message << "Testing effects of partial occluder on rendering time" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two quads. The first (occluding) quad covers " << m_coverage*100.0f
<< "% of the screen, while the second (partially occluded, fullscreen) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in render time increasing proportionally with unoccluded area" << TestLog::EndMessage;
log << TestLog::EndSection;
log << TestLog::Section("Description", "Test description");
log << TestLog::Message << "Testing effects of non-default frag depth on culling efficiency" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullscreen quads. The first (occluding) quad is trivial, while the second (occluded) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The occluder quad has a static offset applied to gl_FragDepth" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in no correlation between workload and render time" << TestLog::EndMessage;
log << TestLog::Section("Description", "Test description");
log << TestLog::Message << "Testing effects of non-default frag depth on culling efficiency" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullscreen quads. The first (occluding) quad is trivial, while the second (occluded) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The occluder quad has a dynamic offset applied to gl_FragDepth" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in no correlation between workload and render time" << TestLog::EndMessage;
log << TestLog::Section("Description", "Test description");
log << TestLog::Message << "Testing effects of non-default frag depth on rendering time" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullscreen quads. The first (occluding) quad is trivial, while the second (occluded) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The occluded quad has a static offset applied to gl_FragDepth" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in no correlation between workload and render time" << TestLog::EndMessage;
log << TestLog::Section("Description", "Test description");
log << TestLog::Message << "Testing effects of non-default frag depth on rendering time" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullscreen quads. The first (occluding) quad is trivial, while the second (occluded) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The occluded quad has a dynamic offset applied to gl_FragDepth" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in no correlation between workload and render time" << TestLog::EndMessage;
log << TestLog::Section("Description", "Test description");
log << TestLog::Message << "Testing effects of of back first rendering order on culling efficiency" << TestLog::EndMessage;
log << TestLog::Message << "Geometry consists of two fullscreen quads. The second (occluding) quad is trivial, while the first (occluded) contains significant fragment shader work" << TestLog::EndMessage;
- log << TestLog::Message << "Workload indicates the number of iterations of dummy work done in the occluded quad's fragment shader" << TestLog::EndMessage;
+ log << TestLog::Message << "Workload indicates the number of iterations of unused work done in the occluded quad's fragment shader" << TestLog::EndMessage;
log << TestLog::Message << "The ratio of rendering times of this scene with/without depth testing are compared" << TestLog::EndMessage;
log << TestLog::Message << "Successfull early Z-testing should result in no correlation between workload and render time" << TestLog::EndMessage;
log << TestLog::EndSection;
ShaderCompilerCase::IterateResult ShaderCompilerCase::iterate (void)
{
- // Before actual measurements, compile and draw with a dummy shader to avoid possible initial slowdowns in the actual test.
+ // Before actual measurements, compile and draw with a minimal shader to avoid possible initial slowdowns in the actual test.
{
deUint32 specID = getSpecializationID(0);
ProgramContext progCtx;
DE_ASSERT(shaderValidity != SHADER_VALIDITY_LAST);
- // Before actual measurements, compile a dummy shader to avoid possible initial slowdowns in the actual test.
+ // Before actual measurements, compile a minimal shader to avoid possible initial slowdowns in the actual test.
{
deUint32 specID = getSpecializationID(0);
ProgramContext progCtx;
return STOP;
}
-extern "C" void GLW_APIENTRY dummyCallback(GLenum, GLenum, GLuint, GLenum, GLsizei, const char*, const void*)
+extern "C" void GLW_APIENTRY emptyCallback(GLenum, GLenum, GLuint, GLenum, GLsizei, const char*, const void*)
{
- // dummy
+ // empty
}
class DebugCallbackFunctionCase : public TestCase
{
const tcu::ScopedLogSection section(m_testCtx.getLog(), "Set", "Set");
- gl.glDebugMessageCallback(dummyCallback, DE_NULL);
- verifyStatePointer(result, gl, GL_DEBUG_CALLBACK_FUNCTION, (const void*)dummyCallback, QUERY_POINTER);
+ gl.glDebugMessageCallback(emptyCallback, DE_NULL);
+ verifyStatePointer(result, gl, GL_DEBUG_CALLBACK_FUNCTION, (const void*)emptyCallback, QUERY_POINTER);
}
result.setTestContextResult(m_testCtx);
const tcu::ScopedLogSection section (m_testCtx.getLog(), "Set", "Set");
const void* param = (void*)(int*)0x123;
- gl.glDebugMessageCallback(dummyCallback, param);
+ gl.glDebugMessageCallback(emptyCallback, param);
verifyStatePointer(result, gl, GL_DEBUG_CALLBACK_USER_PARAM, param, QUERY_POINTER);
}
void GeometryProgramQueryCase::expectQueryError (deUint32 program)
{
const glw::Functions& gl = m_context.getRenderContext().getFunctions();
- glw::GLint dummy;
+ glw::GLint unused;
glw::GLenum errorCode;
m_testCtx.getLog() << tcu::TestLog::Message << "Querying " << glu::getProgramParamStr(m_target) << ", expecting INVALID_OPERATION" << tcu::TestLog::EndMessage;
- gl.getProgramiv(program, m_target, &dummy);
+ gl.getProgramiv(program, m_target, &unused);
errorCode = gl.getError();
const int feedbackCount = 5 * 4; // 5x vec4
const std::vector<float> initialBufferContents (feedbackCount, -1.0f);
- m_testCtx.getLog() << tcu::TestLog::Message << "Filling feeback buffer with dummy value (-1.0)." << tcu::TestLog::EndMessage;
+ m_testCtx.getLog() << tcu::TestLog::Message << "Filling feeback buffer with unused value (-1.0)." << tcu::TestLog::EndMessage;
gl.genBuffers(1, &m_feedbackBuf);
gl.bindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, m_feedbackBuf);
{
glu::ShaderProgram srcProgram (ctx.getRenderContext(), glu::makeVtxFragSources(vertexShaderSource, fragmentShaderSource));
GLuint dstProgram = ctx.glCreateProgram();
- GLuint dummyShader = ctx.glCreateShader(GL_VERTEX_SHADER);
+ GLuint unusedShader = ctx.glCreateShader(GL_VERTEX_SHADER);
GLenum binaryFormat = -1;
GLsizei binaryLength = -1;
std::vector<deUint8> binaryBuf;
ctx.expectError (GL_NO_ERROR);
ctx.beginSection("GL_INVALID_OPERATION is generated if program is not the name of an existing program object.");
- ctx.glProgramBinary (dummyShader, binaryFormat, &binaryBuf[0], binaryLength);
+ ctx.glProgramBinary (unusedShader, binaryFormat, &binaryBuf[0], binaryLength);
ctx.expectError (GL_INVALID_OPERATION);
ctx.endSection();
ctx.endSection();
}
- ctx.glDeleteShader(dummyShader);
+ ctx.glDeleteShader(unusedShader);
ctx.glDeleteProgram(dstProgram);
}
{
GLuint shader = ctx.glCreateShader(GL_VERTEX_SHADER);
glu::ShaderProgram program (ctx.getRenderContext(), glu::makeVtxFragSources(getVtxFragVersionSources(uniformTestVertSource, ctx), getVtxFragVersionSources(uniformTestFragSource, ctx)));
- GLuint dummyUniformIndex = 1;
- GLint dummyParamDst = -1;
+ GLuint unusedUniformIndex = 1;
+ GLint unusedParamDst = -1;
GLint numActiveUniforms = -1;
ctx.glUseProgram(program.getProgram());
ctx.getLog() << TestLog::Message << "// GL_ACTIVE_UNIFORMS = " << numActiveUniforms << " (expected 4)." << TestLog::EndMessage;
ctx.beginSection("GL_INVALID_VALUE is generated if program is not a value generated by OpenGL.");
- ctx.glGetActiveUniformsiv(-1, 1, &dummyUniformIndex, GL_UNIFORM_TYPE, &dummyParamDst);
+ ctx.glGetActiveUniformsiv(-1, 1, &unusedUniformIndex, GL_UNIFORM_TYPE, &unusedParamDst);
ctx.expectError(GL_INVALID_VALUE);
ctx.endSection();
ctx.beginSection("GL_INVALID_OPERATION is generated if program is not a program object.");
- ctx.glGetActiveUniformsiv(shader, 1, &dummyUniformIndex, GL_UNIFORM_TYPE, &dummyParamDst);
+ ctx.glGetActiveUniformsiv(shader, 1, &unusedUniformIndex, GL_UNIFORM_TYPE, &unusedParamDst);
ctx.expectError(GL_INVALID_OPERATION);
ctx.endSection();
invalidUniformIndices.push_back(numActiveUniforms-1+excess);
invalidUniformIndices.push_back(1);
- std::vector<GLint> dummyParamsDst(invalidUniformIndices.size());
- ctx.glGetActiveUniformsiv(program.getProgram(), (GLsizei)invalidUniformIndices.size(), &invalidUniformIndices[0], GL_UNIFORM_TYPE, &dummyParamsDst[0]);
+ std::vector<GLint> unusedParamsDst(invalidUniformIndices.size());
+ ctx.glGetActiveUniformsiv(program.getProgram(), (GLsizei)invalidUniformIndices.size(), &invalidUniformIndices[0], GL_UNIFORM_TYPE, &unusedParamsDst[0]);
ctx.expectError(excess == 0 ? GL_NO_ERROR : GL_INVALID_VALUE);
}
ctx.endSection();
ctx.beginSection("GL_INVALID_ENUM is generated if pname is not an accepted token.");
- ctx.glGetActiveUniformsiv(program.getProgram(), 1, &dummyUniformIndex, -1, &dummyParamDst);
+ ctx.glGetActiveUniformsiv(program.getProgram(), 1, &unusedUniformIndex, -1, &unusedParamDst);
ctx.expectError(GL_INVALID_ENUM);
ctx.endSection();
if (hasTessellation)
buf << " gl_Position = a_position;\n";
else if (m_depthType == DEPTH_USER_DEFINED)
- buf << " highp float dummyZ = a_position.z;\n"
+ buf << " highp float unusedZ = a_position.z;\n"
" highp float writtenZ = a_position.w;\n"
- " gl_Position = vec4(a_position.xy, dummyZ, 1.0);\n"
+ " gl_Position = vec4(a_position.xy, unusedZ, 1.0);\n"
" v_fragDepth = writtenZ * u_depthScale + u_depthBias;\n";
else
buf << " highp float writtenZ = a_position.w;\n"
" highp vec4 tessellatedPos = gl_TessCoord.x * gl_in[0].gl_Position + gl_TessCoord.y * gl_in[1].gl_Position + gl_TessCoord.z * gl_in[2].gl_Position;\n";
if (m_depthType == DEPTH_USER_DEFINED)
- buf << " highp float dummyZ = tessellatedPos.z;\n"
+ buf << " highp float unusedZ = tessellatedPos.z;\n"
" highp float writtenZ = tessellatedPos.w;\n"
- " gl_Position = vec4(tessellatedPos.xy, dummyZ, 1.0);\n"
+ " gl_Position = vec4(tessellatedPos.xy, unusedZ, 1.0);\n"
" v_fragDepth = writtenZ * u_depthScale + u_depthBias;\n";
else
buf << " highp float writtenZ = tessellatedPos.w;\n"
return retVal;
}
-static bool dummyTrueConstantTypeFilter (glu::DataType d)
+static bool unusedTrueConstantTypeFilter (glu::DataType d)
{
DE_UNREF(d);
return true;
static int getNumTypeInstances (const ProgramInterfaceDefinition::Shader* shader, glu::Storage storage)
{
- return getNumTypeInstances(shader, storage, dummyTrueConstantTypeFilter);
+ return getNumTypeInstances(shader, storage, unusedTrueConstantTypeFilter);
}
static int accumulateShaderStorage (const ProgramInterfaceDefinition::Shader* shader, glu::Storage storage, int (*typeMap)(glu::DataType))
}
/**
- * Name of the dummy uniform added by generateProgramInterfaceProgramSources
+ * Name of the unused uniform added by generateProgramInterfaceProgramSources
*
- * A uniform named "dummyZero" is added by
+ * A uniform named "unusedZero" is added by
* generateProgramInterfaceProgramSources. It is used in expressions to
* prevent various program resources from being eliminated by the GLSL
* compiler's optimizer.
*
* \sa deqp::gles31::Functional::ProgramInterfaceDefinition::generateProgramInterfaceProgramSources
*/
-const char* getDummyZeroUniformName()
+const char* getUnusedZeroUniformName()
{
- return "dummyZero";
+ return "unusedZero";
}
glu::ProgramSources generateProgramInterfaceProgramSources (const ProgramInterfaceDefinition::Program* program)
// Use inputs and outputs so that they won't be removed by the optimizer
- usageBuf << "highp uniform vec4 " << getDummyZeroUniformName() << "; // Default value is vec4(0.0).\n"
+ usageBuf << "highp uniform vec4 " << getUnusedZeroUniformName() << "; // Default value is vec4(0.0).\n"
"highp vec4 readInputs()\n"
"{\n"
- " highp vec4 retValue = " << getDummyZeroUniformName() << ";\n";
+ " highp vec4 retValue = " << getUnusedZeroUniformName() << ";\n";
// User-defined inputs
usageBuf << " return retValue;\n"
"}\n\n";
- usageBuf << "void writeOutputs(in highp vec4 dummyValue)\n"
+ usageBuf << "void writeOutputs(in highp vec4 unusedValue)\n"
"{\n";
// User-defined outputs
shader->getDefaultBlock().variables[ndx].storage == glu::STORAGE_PATCH_OUT)
{
writeVariableWriteExpression(usageBuf,
- "dummyValue",
+ "unusedValue",
shader->getDefaultBlock().variables[ndx].name,
shader->getType(),
shader->getDefaultBlock().variables[ndx].storage,
const glu::InterfaceBlock& interface = shader->getDefaultBlock().interfaceBlocks[interfaceNdx];
if (isWritableInterface(interface))
{
- writeInterfaceWriteExpression(usageBuf, "dummyValue", interface, shader->getType(), program);
+ writeInterfaceWriteExpression(usageBuf, "unusedValue", interface, shader->getType(), program);
containsUserDefinedOutputs = true;
}
}
// Builtin-outputs that must be written to
if (shader->getType() == glu::SHADERTYPE_VERTEX)
- usageBuf << " gl_Position = dummyValue;\n";
+ usageBuf << " gl_Position = unusedValue;\n";
else if (shader->getType() == glu::SHADERTYPE_GEOMETRY)
- usageBuf << " gl_Position = dummyValue;\n"
+ usageBuf << " gl_Position = unusedValue;\n"
" EmitVertex();\n";
else if (shader->getType() == glu::SHADERTYPE_TESSELLATION_CONTROL)
- usageBuf << " gl_out[gl_InvocationID].gl_Position = dummyValue;\n"
+ usageBuf << " gl_out[gl_InvocationID].gl_Position = unusedValue;\n"
" gl_TessLevelOuter[0] = 2.8;\n"
" gl_TessLevelOuter[1] = 2.8;\n"
" gl_TessLevelOuter[2] = 2.8;\n"
" gl_TessLevelInner[0] = 2.8;\n"
" gl_TessLevelInner[1] = 2.8;\n";
else if (shader->getType() == glu::SHADERTYPE_TESSELLATION_EVALUATION)
- usageBuf << " gl_Position = dummyValue;\n";
+ usageBuf << " gl_Position = unusedValue;\n";
// Output to sink input data to
if (!containsUserDefinedOutputs)
{
if (shader->getType() == glu::SHADERTYPE_FRAGMENT)
- usageBuf << " gl_FragDepth = dot(dummyValue.xy, dummyValue.xw);\n";
+ usageBuf << " gl_FragDepth = dot(unusedValue.xy, unusedValue.xw);\n";
else if (shader->getType() == glu::SHADERTYPE_COMPUTE)
- usageBuf << " dummyOutputBlock.dummyValue = dummyValue;\n";
+ usageBuf << " unusedOutputBlock.unusedValue = unusedValue;\n";
}
usageBuf << "}\n\n"
" writeOutputs(readInputs());\n"
"}\n";
- // Interface for dummy output
+ // Interface for unused output
if (shader->getType() == glu::SHADERTYPE_COMPUTE && !containsUserDefinedOutputs)
{
- sourceBuf << "writeonly buffer DummyOutputInterface\n"
+ sourceBuf << "writeonly buffer UnusedOutputInterface\n"
<< "{\n"
- << " highp vec4 dummyValue;\n"
- << "} dummyOutputBlock;\n\n";
+ << " highp vec4 unusedValue;\n"
+ << "} unusedOutputBlock;\n\n";
}
sources << glu::ShaderSource(shader->getType(), sourceBuf.str() + usageBuf.str());
glu::ShaderType getProgramTransformFeedbackStage (const ProgramInterfaceDefinition::Program* program);
std::vector<std::string> getProgramInterfaceResourceList (const ProgramInterfaceDefinition::Program* program, ProgramInterface interface);
std::vector<std::string> getProgramInterfaceBlockMemberResourceList (const glu::InterfaceBlock& interfaceBlock);
-const char* getDummyZeroUniformName ();
+const char* getUnusedZeroUniformName ();
glu::ProgramSources generateProgramInterfaceProgramSources (const ProgramInterfaceDefinition::Program* program);
bool findProgramVariablePathByPathName (std::vector<ProgramInterfaceDefinition::VariablePathComponent>& typePath, const ProgramInterfaceDefinition::Program* program, const std::string& pathName, const ProgramInterfaceDefinition::VariableSearchFilter& filter);
void generateVariableTypeResourceNames (std::vector<std::string>& resources, const std::string& name, const glu::VarType& type, deUint32 resourceNameGenerationFlags);
{
DE_UNREF(implementationName);
- std::vector<VariablePathComponent> dummyPath;
- const bool referencedByShader = findProgramVariablePathByPathName(dummyPath, program, resource, m_filter);
+ std::vector<VariablePathComponent> unusedPath;
+ const bool referencedByShader = findProgramVariablePathByPathName(unusedPath, program, resource, m_filter);
m_testCtx.getLog()
<< tcu::TestLog::Message
for (int ndx = 0; ndx < (int)resourceList.size(); ++ndx)
{
- // dummyZero is a uniform that may be added by
+ // unusedZero is a uniform that may be added by
// generateProgramInterfaceProgramSources. Omit it here to avoid
// confusion about the output.
- if (resourceList[ndx] != getDummyZeroUniformName())
+ if (resourceList[ndx] != getUnusedZeroUniformName())
m_testCtx.getLog() << tcu::TestLog::Message << "\t" << ndx << ": " << resourceList[ndx] << tcu::TestLog::EndMessage;
}
{
if (!de::contains(expectedResources.begin(), expectedResources.end(), resourceList[ndx]))
{
- // Ignore all builtin variables or the variable dummyZero,
- // mismatch causes errors otherwise. dummyZero is a uniform that
+ // Ignore all builtin variables or the variable unusedZero,
+ // mismatch causes errors otherwise. unusedZero is a uniform that
// may be added by generateProgramInterfaceProgramSources.
if (deStringBeginsWith(resourceList[ndx].c_str(), "gl_") == DE_FALSE &&
- resourceList[ndx] != getDummyZeroUniformName())
+ resourceList[ndx] != getUnusedZeroUniformName())
{
m_testCtx.getLog() << tcu::TestLog::Message << "Error, resource list contains unexpected resource name " << resourceList[ndx] << tcu::TestLog::EndMessage;
error = true;
// .named_block
{
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(binding, true));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- blockContentGenerator(context, dummyVariable, targetGroup, programInterface, "named_block");
+ blockContentGenerator(context, unusedVariable, targetGroup, programInterface, "named_block");
}
// .unnamed_block
{
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(binding, false));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- blockContentGenerator(context, dummyVariable, targetGroup, programInterface, "unnamed_block");
+ blockContentGenerator(context, unusedVariable, targetGroup, programInterface, "unnamed_block");
}
// .block_array
{
const ResourceDefinition::Node::SharedPtr arrayElement (new ResourceDefinition::ArrayElement(binding, 3));
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(arrayElement, true));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- blockContentGenerator(context, dummyVariable, targetGroup, programInterface, "block_array");
+ blockContentGenerator(context, unusedVariable, targetGroup, programInterface, "block_array");
}
// .block_array_single_element
{
const ResourceDefinition::Node::SharedPtr arrayElement (new ResourceDefinition::ArrayElement(binding, 1));
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(arrayElement, true));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- blockContentGenerator(context, dummyVariable, targetGroup, programInterface, "block_array_single_element");
+ blockContentGenerator(context, unusedVariable, targetGroup, programInterface, "block_array_single_element");
}
}
// .named_block*
{
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(binding, true));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- targetGroup->addChild(new ResourceTestCase(context, dummyVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_BUFFER_BINDING), ("named_block" + nameSuffix).c_str()));
+ targetGroup->addChild(new ResourceTestCase(context, unusedVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_BUFFER_BINDING), ("named_block" + nameSuffix).c_str()));
}
// .unnamed_block*
{
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(binding, false));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- targetGroup->addChild(new ResourceTestCase(context, dummyVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_BUFFER_BINDING), ("unnamed_block" + nameSuffix).c_str()));
+ targetGroup->addChild(new ResourceTestCase(context, unusedVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_BUFFER_BINDING), ("unnamed_block" + nameSuffix).c_str()));
}
// .block_array*
{
const ResourceDefinition::Node::SharedPtr arrayElement (new ResourceDefinition::ArrayElement(binding, 3));
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(arrayElement, true));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- targetGroup->addChild(new ResourceTestCase(context, dummyVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_BUFFER_BINDING), ("block_array" + nameSuffix).c_str()));
+ targetGroup->addChild(new ResourceTestCase(context, unusedVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_BUFFER_BINDING), ("block_array" + nameSuffix).c_str()));
}
}
}
// .named_block
{
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(storage, true));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- targetGroup->addChild(new ResourceTestCase(context, dummyVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_REFERENCED_BY_SHADER), "named_block"));
+ targetGroup->addChild(new ResourceTestCase(context, unusedVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_REFERENCED_BY_SHADER), "named_block"));
}
// .unnamed_block
{
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(storage, false));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- targetGroup->addChild(new ResourceTestCase(context, dummyVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_REFERENCED_BY_SHADER), "unnamed_block"));
+ targetGroup->addChild(new ResourceTestCase(context, unusedVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_REFERENCED_BY_SHADER), "unnamed_block"));
}
// .block_array
{
const ResourceDefinition::Node::SharedPtr arrayElement (new ResourceDefinition::ArrayElement(storage, 3));
const ResourceDefinition::Node::SharedPtr block (new ResourceDefinition::InterfaceBlock(arrayElement, true));
- const ResourceDefinition::Node::SharedPtr dummyVariable (new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
+ const ResourceDefinition::Node::SharedPtr unusedVariable(new ResourceDefinition::Variable(block, glu::TYPE_BOOL_VEC3));
- targetGroup->addChild(new ResourceTestCase(context, dummyVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_REFERENCED_BY_SHADER), "block_array"));
+ targetGroup->addChild(new ResourceTestCase(context, unusedVariable, ProgramResourceQueryTestTarget(programInterface, PROGRAMRESOURCEPROP_REFERENCED_BY_SHADER), "block_array"));
}
}
const string fragmentSource = generateFragmentSource(basicUniforms);
const ShaderProgram program (m_context.getRenderContext(), glu::makeVtxFragSources(vertexSource, fragmentSource));
- // A dummy program that we'll give to glUseProgram before we actually need
+ // An unused program that we'll give to glUseProgram before we actually need
// the real program above, to see if an implementation tries to use the
// currently active program for something inappropriate (instead of the
// program given as argument to, say, glProgramUniform*).
- const ShaderProgram dummyProgram (m_context.getRenderContext(), glu::makeVtxFragSources("#version 310 es\n"
+ const ShaderProgram unusedProgram (m_context.getRenderContext(), glu::makeVtxFragSources("#version 310 es\n"
"void main (void) { gl_Position = vec4(1.0); }\n",
"#version 310 es\n"
return STOP;
}
- if (!dummyProgram.isOk())
+ if (!unusedProgram.isOk())
{
- log << dummyProgram;
- m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Compilation of dummy program failed");
+ log << unusedProgram;
+ m_testCtx.setTestResult(QP_TEST_RESULT_FAIL, "Compilation of unused program failed");
return STOP;
}
- log << TestLog::Message << "// Note: calling glUseProgram with a dummy program (will only use the real program once it's needed for rendering)" << TestLog::EndMessage;
- glUseProgram(dummyProgram.getProgram());
+ log << TestLog::Message << "// Note: calling glUseProgram with a unused program (will only use the real program once it's needed for rendering)" << TestLog::EndMessage;
+ glUseProgram(unusedProgram.getProgram());
const bool success = test(basicUniforms, basicUniformReportsRef, program, rnd);
m_testCtx.setTestResult(success ? QP_TEST_RESULT_PASS : QP_TEST_RESULT_FAIL,
<< "layout (binding = 1, std430) volatile buffer WorkBuffer\n"
<< "{\n"
<< " highp uint targetValue;\n"
- << " highp uint dummy;\n"
+ << " highp uint unused;\n"
<< "} sb_work;\n"
<< "\n"
<< "void main ()\n"
<< "{\n"
<< " // flip high bits\n"
<< " highp uint mask = uint(1) << (24u + (gl_GlobalInvocationID.x % 8u));\n"
- << " sb_work.dummy = atomicXor(sb_work.targetValue, mask);\n"
+ << " sb_work.unused = atomicXor(sb_work.targetValue, mask);\n"
<< "}";
return specializeShader(m_context, buf.str().c_str());
return (dimensionSize + blockSize - 1) / blockSize;
}
-void generateDummyCompressedData (tcu::CompressedTexture& dst, const tcu::CompressedTexFormat& format)
+void generateDefaultCompressedData (tcu::CompressedTexture& dst, const tcu::CompressedTexFormat& format)
{
const int blockByteSize = tcu::getBlockSize(format);
const tcu::IVec3 blockPixelSize = tcu::getBlockPixelSize(format);
};
template <typename T>
-de::MovePtr<T> genDummyTexture (glu::RenderContext& renderCtx, const glu::ContextInfo& ctxInfo, deUint32 texFormat, const typename TextureTraits<T>::SizeType& size)
+de::MovePtr<T> genDefaultTexture (glu::RenderContext& renderCtx, const glu::ContextInfo& ctxInfo, deUint32 texFormat, const typename TextureTraits<T>::SizeType& size)
{
de::MovePtr<T> texture;
const bool isAstcFormat = tcu::isAstcFormat(compressedFormat);
tcu::TexDecompressionParams decompressionParams ((isAstcFormat) ? (tcu::TexDecompressionParams::ASTCMODE_LDR) : (tcu::TexDecompressionParams::ASTCMODE_LAST));
- generateDummyCompressedData(compressedLevel, compressedFormat);
+ generateDefaultCompressedData(compressedLevel, compressedFormat);
texture = TextureTraits<T>::createTextureFromCompressedData(renderCtx,
ctxInfo,
// resources
- m_texture = genDummyTexture<glu::Texture2D>(m_context.getRenderContext(), m_context.getContextInfo(), m_texFormat, tcu::IVec2(m_texWidth, m_texHeight));
+ m_texture = genDefaultTexture<glu::Texture2D>(m_context.getRenderContext(), m_context.getContextInfo(), m_texFormat, tcu::IVec2(m_texWidth, m_texHeight));
m_testCtx.getLog() << tcu::TestLog::Message
<< "Created texture with format " << glu::getTextureFormatName(m_texFormat)
}
// resources
- m_texture = genDummyTexture<glu::Texture3D>(m_context.getRenderContext(), m_context.getContextInfo(), m_texFormat, m_size);
+ m_texture = genDefaultTexture<glu::Texture3D>(m_context.getRenderContext(), m_context.getContextInfo(), m_texFormat, m_size);
m_renderer = de::MovePtr<gls::TextureTestUtil::TextureRenderer>(new gls::TextureTestUtil::TextureRenderer(m_context.getRenderContext(), m_testCtx.getLog(), glslVersion, glu::PRECISION_HIGHP));
// texture info
glu::CallLogWrapper gl (m_context.getRenderContext().getFunctions(), m_testCtx.getLog());
const int positionLoc = gl.glGetAttribLocation(m_program->getProgram(), "a_position");
const int colorLoc = gl.glGetAttribLocation(m_program->getProgram(), "a_color");
- glu::Buffer dummyBuffer (m_context.getRenderContext());
+ glu::Buffer unusedBuffer (m_context.getRenderContext());
gl.enableLogging(true);
{
// bind data using old api
- gl.glBindBuffer(GL_ARRAY_BUFFER, *dummyBuffer);
+ gl.glBindBuffer(GL_ARRAY_BUFFER, *unusedBuffer);
gl.glVertexAttribPointer(positionLoc, 4, GL_FLOAT, GL_FALSE, (glw::GLsizei)(2 * sizeof(tcu::Vec4)), (const deUint8*)DE_NULL);
gl.glVertexAttribPointer(colorLoc, 4, GL_FLOAT, GL_FALSE, (glw::GLsizei)(2 * sizeof(tcu::Vec4)), glu::BufferOffsetAsPointer(sizeof(tcu::Vec4)));
{
// \todo [2016-11-15 pyry] Many tests (erroneously) inspect context type
// during test hierarchy construction. We should fix that
- // and revert dummy context to advertise unknown context type.
- m_renderCtx = new glu::DummyRenderContext(glu::ContextType(glu::ApiType::es(3,1)));
+ // and revert empty context to advertise unknown context type.
+ m_renderCtx = new glu::EmptyRenderContext(glu::ContextType(glu::ApiType::es(3,1)));
}
}
}
/*--------------------------------------------------------------------*//*!
- * \brief Dummy placeholder type for unused template parameters.
+ * \brief A placeholder type for unused template parameters.
*
* In the precision tests we are dealing with functions of different arities.
* To minimize code duplication, we only define templates with the maximum
template <typename T>
class ExprP : public ExprPBase<T> {};
-// We treat Voids as containers since the dummy parameters in generalized
+// We treat Voids as containers since the unused parameters in generalized
// vector functions are represented as Voids.
template <>
class ExprP<Void> : public ContainerExprPBase<Void> {};
executor->execute(int(numValues), inputArr, outputArr);
}
- // Initialize environment with dummy values so we don't need to bind in inner loop.
+ // Initialize environment with unused values so we don't need to bind in inner loop.
{
const typename Traits<In0>::IVal in0;
const typename Traits<In1>::IVal in1;
void LongStressCase::init (void)
{
- // Generate dummy texture data for each texture spec in m_programContexts.
+ // Generate unused texture data for each texture spec in m_programContexts.
DE_ASSERT(!m_programContexts.empty());
DE_ASSERT(m_programResources.empty());
// If texture data with the same format has already been generated, re-use that (don't care much about contents).
- SharedPtr<TextureLevel> dummyTex;
+ SharedPtr<TextureLevel> unusedTex;
for (int prevProgCtxNdx = 0; prevProgCtxNdx < (int)m_programResources.size(); prevProgCtxNdx++)
{
- const vector<SharedPtr<TextureLevel> >& prevProgCtxTextures = m_programResources[prevProgCtxNdx].dummyTextures;
+ const vector<SharedPtr<TextureLevel> >& prevProgCtxTextures = m_programResources[prevProgCtxNdx].unusedTextures;
for (int texNdx = 0; texNdx < (int)prevProgCtxTextures.size(); texNdx++)
{
if (prevProgCtxTextures[texNdx]->getFormat() == format)
{
- dummyTex = prevProgCtxTextures[texNdx];
+ unusedTex = prevProgCtxTextures[texNdx];
break;
}
}
}
- if (!dummyTex)
- dummyTex = SharedPtr<TextureLevel>(new TextureLevel(format));
+ if (!unusedTex)
+ unusedTex = SharedPtr<TextureLevel>(new TextureLevel(format));
- if (dummyTex->getWidth() < spec.width || dummyTex->getHeight() < spec.height)
+ if (unusedTex->getWidth() < spec.width || unusedTex->getHeight() < spec.height)
{
- dummyTex->setSize(spec.width, spec.height);
- tcu::fillWithComponentGradients(dummyTex->getAccess(), spec.minValue, spec.maxValue);
+ unusedTex->setSize(spec.width, spec.height);
+ tcu::fillWithComponentGradients(unusedTex->getAccess(), spec.minValue, spec.maxValue);
}
- progRes.dummyTextures.push_back(dummyTex);
+ progRes.unusedTextures.push_back(unusedTex);
}
}
m_textures->removeGarbageUntilUnder(m_maxTexMemoryUsageBytes - texture.getApproxMemUsageDiff(spec.width, spec.height, spec.internalFormat, spec.useMipmap), m_rnd);
if (!hadTexture || m_rnd.getFloat() < m_probabilities.reuploadWithTexImage)
- texture.setData(programResources.dummyTextures[texNdx]->getAccess(), spec.width, spec.height, spec.internalFormat, spec.useMipmap);
+ texture.setData(programResources.unusedTextures[texNdx]->getAccess(), spec.width, spec.height, spec.internalFormat, spec.useMipmap);
else
- texture.setSubData(programResources.dummyTextures[texNdx]->getAccess(), 0, 0, spec.width, spec.height);
+ texture.setSubData(programResources.unusedTextures[texNdx]->getAccess(), 0, 0, spec.width, spec.height);
texture.toUnit(0);
texture.setWrap(spec.sWrap, spec.tWrap);
std::vector<deUint8> attrDataBuf;
std::vector<int> attrDataOffsets;
std::vector<int> attrDataSizes;
- std::vector<de::SharedPtr<tcu::TextureLevel> > dummyTextures;
+ std::vector<de::SharedPtr<tcu::TextureLevel> > unusedTextures;
std::string shaderNameManglingSuffix;
};
DE_ASSERT(glslVersion == glu::GLSL_VERSION_100_ES || glslVersion == glu::GLSL_VERSION_300_ES);
}
-gls::ProgramContext ProgramLibrary::generateBufferContext (const int numDummyAttributes) const
+gls::ProgramContext ProgramLibrary::generateBufferContext (const int numUnusedAttributes) const
{
static const char* const vertexTemplate =
"${VTX_HEADER}"
"${VTX_IN} highp vec3 a_position;\n"
- "${VTX_DUMMY_INPUTS}"
+ "${VTX_UNUSED_INPUTS}"
"${VTX_OUT} mediump vec4 v_color;\n"
"\n"
"void main (void)\n"
map<string, string> firstLevelParams;
{
- string vtxDummyInputs;
+ string vtxUnusedInputs;
string vtxColorExpr;
- for (int i = 0; i < numDummyAttributes; i++)
+ for (int i = 0; i < numUnusedAttributes; i++)
{
- vtxDummyInputs += "${VTX_IN} mediump vec4 a_in" + toString(i) + ";\n";
+ vtxUnusedInputs += "${VTX_IN} mediump vec4 a_in" + toString(i) + ";\n";
vtxColorExpr += string() + (i > 0 ? " + " : "") + "a_in" + toString(i);
}
- firstLevelParams["VTX_DUMMY_INPUTS"] = substitute(vtxDummyInputs);
+ firstLevelParams["VTX_UNUSED_INPUTS"] = substitute(vtxUnusedInputs);
firstLevelParams["VTX_COLOR_EXPRESSION"] = vtxColorExpr;
}
context.attributes.push_back(gls::VarSpec("a_position", Vec3(-0.1f), Vec3(0.1f)));
- for (int i = 0; i < numDummyAttributes; i++)
- context.attributes.push_back(gls::VarSpec("a_in" + de::toString(i), Vec4(0.0f), Vec4(1.0f / (float)numDummyAttributes)));
+ for (int i = 0; i < numUnusedAttributes; i++)
+ context.attributes.push_back(gls::VarSpec("a_in" + de::toString(i), Vec4(0.0f), Vec4(1.0f / (float)numUnusedAttributes)));
return context;
}
public:
ProgramLibrary (glu::GLSLVersion glslVersion);
- gls::ProgramContext generateBufferContext (int numDummyAttributes) const;
+ gls::ProgramContext generateBufferContext (int numUnusedAttributes) const;
gls::ProgramContext generateTextureContext (int numTextureObjects, int texWid, int texHei, float positionFactor) const;
gls::ProgramContext generateBufferAndTextureContext (int numTextures, int texWid, int texHei) const;
gls::ProgramContext generateFragmentPointLightContext (int texWid, int texHei) const;
Result m_result;
MemObjectConfig m_config;
deUint32 m_glError;
- vector<deUint8> m_dummyData;
+ vector<deUint8> m_unusedData;
BufferRenderer m_bufferRenderer;
TextureRenderer m_textureRenderer;
};
{
DE_UNREF(renderContext);
- if (m_config.useDummyData)
+ if (m_config.useUnusedData)
{
- int dummySize = deMax32(m_config.maxBufferSize, m_config.maxTextureSize*m_config.maxTextureSize*4);
- m_dummyData = vector<deUint8>(dummySize);
+ int unusedSize = deMax32(m_config.maxBufferSize, m_config.maxTextureSize*m_config.maxTextureSize*4);
+ m_unusedData = vector<deUint8>(unusedSize);
}
else if (m_config.write)
- m_dummyData = vector<deUint8>(128);
+ m_unusedData = vector<deUint8>(128);
}
MemObjectAllocator::~MemObjectAllocator (void)
return;
}
- if (m_config.useDummyData)
+ if (m_config.useUnusedData)
{
- DE_ASSERT((int)m_dummyData.size() >= width*height*4);
- glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, &(m_dummyData[0]));
+ DE_ASSERT((int)m_unusedData.size() >= width*height*4);
+ glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, &(m_unusedData[0]));
}
else
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
if (m_config.write)
- glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, &(m_dummyData[0]));
+ glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 1, 1, GL_RGBA, GL_UNSIGNED_BYTE, &(m_unusedData[0]));
error = glGetError();
if (error != 0)
m_buffers.push_back(buffer);
- if (m_config.useDummyData)
+ if (m_config.useUnusedData)
{
- DE_ASSERT((int)m_dummyData.size() >= size);
- glBufferData(GL_ARRAY_BUFFER, size, &(m_dummyData[0]), GL_DYNAMIC_DRAW);
+ DE_ASSERT((int)m_unusedData.size() >= size);
+ glBufferData(GL_ARRAY_BUFFER, size, &(m_unusedData[0]), GL_DYNAMIC_DRAW);
}
else
glBufferData(GL_ARRAY_BUFFER, size, NULL, GL_DYNAMIC_DRAW);
}
if (m_config.write)
- glBufferSubData(GL_ARRAY_BUFFER, 0, 1, &(m_dummyData[0]));
+ glBufferSubData(GL_ARRAY_BUFFER, 0, 1, &(m_unusedData[0]));
error = glGetError();
if (error != 0)
}
}
-MemoryStressCase::MemoryStressCase (tcu::TestContext& ctx, glu::RenderContext& renderContext, deUint32 objectTypes, int minTextureSize, int maxTextureSize, int minBufferSize, int maxBufferSize, bool write, bool use, bool useDummyData, bool clearAfterOOM, const char* name, const char* desc)
+MemoryStressCase::MemoryStressCase (tcu::TestContext& ctx, glu::RenderContext& renderContext, deUint32 objectTypes, int minTextureSize, int maxTextureSize, int minBufferSize, int maxBufferSize, bool write, bool use, bool useUnusedData, bool clearAfterOOM, const char* name, const char* desc)
: tcu::TestCase (ctx, name, desc)
, m_iteration (0)
, m_iterationCount (5)
m_config.minTextureSize = minTextureSize;
m_config.maxBufferSize = maxBufferSize;
m_config.minBufferSize = minBufferSize;
- m_config.useDummyData = useDummyData;
+ m_config.useUnusedData = useUnusedData;
m_config.write = write;
m_config.use = use;
}
int minTextureSize;
int maxTextureSize;
- bool useDummyData;
+ bool useUnusedData;
bool write;
bool use;
};
class MemoryStressCase : public tcu::TestCase
{
public:
- MemoryStressCase (tcu::TestContext& testCtx, glu::RenderContext& renderContext, deUint32 objectTypes, int minTextureSize, int maxTextureSize, int minBufferSize, int maxBufferSize, bool write, bool use, bool useDummyData, bool clearAfterOOM, const char* name, const char* desc);
+ MemoryStressCase (tcu::TestContext& testCtx, glu::RenderContext& renderContext, deUint32 objectTypes, int minTextureSize, int maxTextureSize, int minBufferSize, int maxBufferSize, bool write, bool use, bool useUnusedData, bool clearAfterOOM, const char* name, const char* desc);
~MemoryStressCase (void);
void init (void);
src << glu::getGLSLVersionDeclaration(version) << "\n";
- // \todo [2013-08-05 pyry] Do we need one dummy output?
+ // \todo [2013-08-05 pyry] Do we need one unused output?
src << "void main (void)\n{\n";
if (!customOut)
const size_t numBlocks = 1024;
generatedData.resize(numBlocks*astc::BLOCK_SIZE_BYTES);
- astc::generateDummyVoidExtentBlocks(&generatedData[0], numBlocks);
+ astc::generateDefaultVoidExtentBlocks(&generatedData[0], numBlocks);
testDecompress(m_format, numBlocks, &generatedData[0]);
verifyBlocksValid(m_format, TexDecompressionParams::ASTCMODE_HDR, numBlocks, &generatedData[0]);
}
- // Verify generating dummy normal blocks
+ // Verify generating unused normal blocks
{
const size_t numBlocks = 1024;
const IVec3 blockPixelSize = getBlockPixelSize(m_format);
generatedData.resize(numBlocks*astc::BLOCK_SIZE_BYTES);
- astc::generateDummyNormalBlocks(&generatedData[0], numBlocks, blockPixelSize.x(), blockPixelSize.y());
+ astc::generateDefaultNormalBlocks(&generatedData[0], numBlocks, blockPixelSize.x(), blockPixelSize.y());
testDecompress(m_format, numBlocks, &generatedData[0]);