Fix tolerance for NEON RNNLayer
authorManuel Bottini <manuel.bottini@arm.com>
Mon, 8 Feb 2021 11:51:48 +0000 (11:51 +0000)
committerGeorgios Pinitas <georgios.pinitas@arm.com>
Mon, 8 Feb 2021 13:30:15 +0000 (13:30 +0000)
Hide FP16 tolerances for NEON backend if FP16 vector arithmetic not supported

Resolves: COMPMID-4239

Change-Id: Ib6aad2de8a0ea621e2fdb673b9bab92c0520acd5
Signed-off-by: Manuel Bottini <manuel.bottini@arm.com>
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/5022
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Giorgio Arena <giorgio.arena@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>

tests/validation/NEON/RNNLayer.cpp

index f8559ff88ea4ad158d621b31ee5c80444e81a5d7..21e20527139938d504e7ad7bfe605d6e5bf17780 100644 (file)
@@ -39,9 +39,11 @@ namespace validation
 {
 namespace
 {
-RelativeTolerance<float> tolerance_f32(0.001f);    /**< Relative tolerance value for comparing reference's output against implementation's output for DataType:F32 */
-RelativeTolerance<half>  tolerance_f16(half(0.1)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType:F16 */
-constexpr float          abs_tolerance_f16(0.02f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType:F16 */
+RelativeTolerance<float> tolerance_f32(0.001f); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType:F32 */
+#ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
+RelativeTolerance<half> tolerance_f16(half(0.1)); /**< Relative tolerance value for comparing reference's output against implementation's output for DataType:F16 */
+constexpr float         abs_tolerance_f16(0.02f); /**< Absolute tolerance value for comparing reference's output against implementation's output for DataType:F16 */
+#endif                                            /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
 } // namespace
 
 TEST_SUITE(NEON)