COMPMID-3464: Address NESoftmaxLayer failures for QASYMM8_SIGNED
authorGeorgios Pinitas <georgios.pinitas@arm.com>
Tue, 12 May 2020 20:03:56 +0000 (21:03 +0100)
committerTeresaARM <teresa.charlinreyes@arm.com>
Wed, 13 May 2020 08:52:14 +0000 (08:52 +0000)
Normalization with the maximum value was causing results to wrap-around
As a work-around we use saturating intrinsics to perform the operation

Signed-off-by: Georgios Pinitas <georgios.pinitas@arm.com>
Change-Id: I719b7ac7ad274dc2ae339bc4a055f9200134ed97
Reviewed-on: https://review.mlplatform.org/c/ml/ComputeLibrary/+/3184
Tested-by: Arm Jenkins <bsgcomp@arm.com>
Reviewed-by: Sang-Hoon Park <sang-hoon.park@arm.com>
Comments-Addressed: Arm Jenkins <bsgcomp@arm.com>

arm_compute/core/NEON/wrapper/intrinsics/sub.h
src/core/NEON/kernels/NESoftmaxLayerKernel.cpp

index 870908d253fe7a2889c19ba12a2537b62d980d56..2c6c96125ad41ee26b2bb71c1eda632edb5912ce 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018-2019 ARM Limited.
+ * Copyright (c) 2018-2020 ARM Limited.
  *
  * SPDX-License-Identifier: MIT
  *
@@ -62,7 +62,33 @@ VSUB_IMPL(float32x4_t, float32x4_t, vsubq, f32)
 VSUB_IMPL(float16x8_t, float16x8_t, vsubq, f16)
 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
 
-#undef vsub_IMPL
+#undef VSUB_IMPL
+
+#define VQSUB_IMPL(stype, vtype, prefix, postfix)      \
+    inline vtype vqsub(const vtype &a, const vtype &b) \
+    {                                                  \
+        return prefix##_##postfix(a, b);               \
+    }
+
+VQSUB_IMPL(uint8x8_t, uint8x8_t, vqsub, u8)
+VQSUB_IMPL(int8x8_t, int8x8_t, vqsub, s8)
+VQSUB_IMPL(uint16x4_t, uint16x4_t, vqsub, u16)
+VQSUB_IMPL(int16x4_t, int16x4_t, vqsub, s16)
+VQSUB_IMPL(uint32x2_t, uint32x2_t, vqsub, u32)
+VQSUB_IMPL(int32x2_t, int32x2_t, vqsub, s32)
+VQSUB_IMPL(uint64x1_t, uint64x1_t, vqsub, u64)
+VQSUB_IMPL(int64x1_t, int64x1_t, vqsub, s64)
+
+VQSUB_IMPL(uint8x16_t, uint8x16_t, vqsubq, u8)
+VQSUB_IMPL(int8x16_t, int8x16_t, vqsubq, s8)
+VQSUB_IMPL(uint16x8_t, uint16x8_t, vqsubq, u16)
+VQSUB_IMPL(int16x8_t, int16x8_t, vqsubq, s16)
+VQSUB_IMPL(uint32x4_t, uint32x4_t, vqsubq, u32)
+VQSUB_IMPL(int32x4_t, int32x4_t, vqsubq, s32)
+VQSUB_IMPL(uint64x2_t, uint64x2_t, vqsubq, u64)
+VQSUB_IMPL(int64x2_t, int64x2_t, vqsubq, s64)
+
+#undef VQSUB_IMPL
 } // namespace wrapper
 } // namespace arm_compute
 #endif /* ARM_COMPUTE_WRAPPER_SUB_H */
index 790c8bacc5928746ecbe6f9574f078dc76fad7de..41bf03ad1da48fff2f1ddcc9816e255fdd5b5fe7 100644 (file)
@@ -311,7 +311,7 @@ void logits_1d_softmax_qasymm8(const ITensor &in, const ITensor &max, void *cons
             for(; x <= (input_width - vec_size); x += vec_size)
             {
                 auto vec_elements     = wrapper::vloadq(in_ptr + x);
-                vec_elements          = wrapper::vsub(vec_max, vec_elements);
+                vec_elements          = wrapper::vqsub(vec_max, vec_elements);
                 auto vec_elements_flt = convert_int_to_float<float32x4x4_t>(vec_elements);
 
                 if(is_log)