Fix precision issue in ShaderMatrixTest
authorfred jin <fred.jin@arm.com>
Fri, 2 Apr 2021 07:39:07 +0000 (15:39 +0800)
committerAlexander Galazin <Alexander.Galazin@arm.com>
Fri, 21 May 2021 09:14:44 +0000 (09:14 +0000)
According to the Spec, the compiler is free to promote computation
precisions, and such promotion depends on the specific shader
content or compiler.

When tests use the lowp or mediump precision in the shaders, It
can't confirm the final computation precision. Now the precision
of the shader inputs is highp, if the real computing precision in
GPU HW is lowp or mediump. It can easily get the comparison failure.
So it's better to adjust the precision of the inputs as the expected
precision in the specific tests.

Affects:

dEQP-GLES3.functional.shaders.matrix.*

Components: OpenGL

VK-GL-CTS issue: 2863

Change-Id: I30af699da12bf88596e7046922b2f1f00d5d3f06

modules/gles3/functional/es3fShaderMatrixTests.cpp

index 098e99e..6d98bc6 100644 (file)
@@ -45,6 +45,7 @@
 #include "tcuMatrix.hpp"
 #include "tcuMatrixUtil.hpp"
 #include "deStringUtil.hpp"
+#include "deFloat16.h"
 
 #include "glwEnums.hpp"
 #include "glwFunctions.hpp"
@@ -1652,10 +1653,10 @@ void ShaderMatrixCase::init (void)
        for (int attribNdx = 0; attribNdx < 4; attribNdx++)
        {
                m_userAttribTransforms[attribNdx] = Mat4(0.0f);
-               m_userAttribTransforms[attribNdx](                  0, 3) = 0.1f + 0.15f * float(attribNdx);    // !< prevent matrix*vec from going into zero (assuming vec.w != 0)
-               m_userAttribTransforms[attribNdx](                  1, 3) = 0.2f + 0.15f * float(attribNdx);    // !<
-               m_userAttribTransforms[attribNdx](                  2, 3) = 0.3f + 0.15f * float(attribNdx);    // !<
-               m_userAttribTransforms[attribNdx](                  3, 3) = 0.4f + 0.15f * float(attribNdx);    // !<
+               m_userAttribTransforms[attribNdx](                  0, 3) = 0.1f + 0.1f * float(attribNdx);     // !< prevent matrix*vec from going into zero (assuming vec.w != 0)
+               m_userAttribTransforms[attribNdx](                  1, 3) = 0.2f + 0.1f * float(attribNdx);     // !<
+               m_userAttribTransforms[attribNdx](                  2, 3) = 0.3f + 0.1f * float(attribNdx);     // !<
+               m_userAttribTransforms[attribNdx](                  3, 3) = 0.4f + 0.1f * float(attribNdx);     // !<
                m_userAttribTransforms[attribNdx]((0 + attribNdx) % 4, 0) = 1.0f;
                m_userAttribTransforms[attribNdx]((1 + attribNdx) % 4, 1) = 1.0f;
                m_userAttribTransforms[attribNdx]((2 + attribNdx) % 4, 2) = 1.0f;
@@ -1695,6 +1696,20 @@ void ShaderMatrixCase::init (void)
                        }
                }
        }
+       // The verification code doesn't deal with reduced precision, so we must quantize the data
+       // here to try to avoid verification errors. No implementation seems to use lowp, so reduce
+       // to mediump.
+       if(resultPrec != PRECISION_HIGHP)
+       {
+               for (int attribNdx = 0; attribNdx < 4; attribNdx++)
+               {
+                       for (int row = 0; row < 4; row++)
+                       for (int col = 0; col < 4; col++)
+                       {
+                               m_userAttribTransforms[attribNdx](row, col) = deFloat16To32(deFloat32To16(m_userAttribTransforms[attribNdx](row, col)));
+                       }
+               }
+       }
 
        ShaderRenderCase::init();
 }