Enable neon snrm2 function for Android (ARM) fp16 computation.
Add unit test for fp16 snrm2 function in Android(ARM).
**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped
Signed-off-by: Debadri Samaddar <s.debadri@samsung.com>
unsigned int incx = abs(incX);
_FP16 sum = 0;
_FP16 tmp;
-#pragma omp parallel for private(tmp) reduction(+ : sum)
+#ifdef USE__FP16
+ if (incX == 1) {
+ sum = nntrainer::neon::snrm2_neon_fp16(N, X);
+ } else {
+ for (unsigned int i = 0; i < N; i++) {
+ tmp = X[i * incx];
+ sum += tmp * tmp;
+ }
+ }
+#else
for (unsigned int i = 0; i < N; i++) {
tmp = X[i * incx];
sum += tmp * tmp;
}
+#endif
return static_cast<_FP16>(sqrt(sum));
}
+
static void sgemm_FP16(CBLAS_ORDER order, CBLAS_TRANSPOSE TransA,
CBLAS_TRANSPOSE TransB, const unsigned int M,
const unsigned int N, const unsigned int K,
unsigned int incx = abs(incX);
float sum = 0.0f;
float tmp;
-#pragma omp parallel for private(tmp) reduction(+ : sum)
+
for (unsigned int i = 0; i < N; i++) {
tmp = X[i * incx];
sum += tmp * tmp;
}
#endif
+__fp16 snrm2_neon_fp16(const unsigned int N, const __fp16 *X) {
+
+ float16x8_t accX8 = vmovq_n_f16(0);
+ float16x4_t accX4 = vmov_n_f16(0);
+
+ unsigned int idx = 0;
+ __fp16 ret = 0;
+
+ // processing batch of 8
+ for (; (N - idx) >= 8; idx += 8) {
+ float16x8_t x = vld1q_f16(&X[idx]);
+
+ // x*x + accX8 -> accX8
+ accX8 = vfmaq_f16(accX8, x, x);
+ }
+
+ // check at least one batch of 8 is processed
+ if (N - 8 >= 0) {
+ __fp16 result[8];
+ vst1q_f16(result, accX8);
+ for (unsigned int i = 0; i < 8; i++)
+ ret += result[i];
+ }
+
+ // processing remaining batch of 4
+ for (; (N - idx) >= 4; idx += 4) {
+ float16x4_t x = vld1_f16(&X[idx]);
+
+ // x*x + accX4 -> accX4
+ accX4 = vfma_f16(accX4, x, x);
+ }
+
+ // check at least one batch of 4 is processed
+ if (N % 8 >= 4) {
+ __fp16 result[4];
+ vst1_f16(result, accX4);
+ ret += result[0] + result[1] + result[2] + result[3];
+ }
+
+ // pocessing remaining values
+ for (; idx < N; idx++)
+ ret += X[idx] * X[idx];
+
+ return ret;
+}
+
} // namespace nntrainer::neon
__fp16 sdot_neon_fp16(const unsigned int N, const __fp16 *X, const __fp16 *Y);
#endif
+/**
+ * @brief sdot computation with neon: sum of all X * Y
+ * @param[in] N number of elements in Y
+ * @param[in] X __fp16 * for Vector X
+ * @param[in] Y __fp16 * for Vector Y
+ */
+__fp16 snrm2_neon_fp16(const unsigned int N, const __fp16 *X);
+
} // namespace nntrainer::neon
#endif /* __cplusplus */
EXPECT_IN_RANGE((float)cosSimNeon, 0.99, 1);
}
+TEST(nntrainer_Tensor, l2norm) {
+
+ nntrainer::TensorDim::TensorType t_type_nchw_fp16 = {
+ nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16};
+
+ nntrainer::TensorDim::TensorType t_type_nchw_fp32 = {
+ nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP32};
+
+ // conditions for fp16 sdot call:
+ // this->(batch * channel * height) = arg->(width) = 1;
+
+ size_t width = 23;
+
+ __fp16 a_data[] = {0, 1.2, 2, 3.4, 4.1, 5.3, 2.9, 2.1, 1.4, 1.6, 0, 2.7,
+ 2.3, 1, 2, 1.1, 3.1, 1.1, 2.8, 3.2, 2, 3.6, 1};
+ nntrainer::Tensor input(
+ nntrainer::TensorDim(1, 1, 1, width, t_type_nchw_fp16), a_data);
+
+ float a_data_fp32[] = {0, 1.2, 2, 3.4, 4.1, 5.3, 2.9, 2.1, 1.4, 1.6, 0, 2.7,
+ 2.3, 1, 2, 1.1, 3.1, 1.1, 2.8, 3.2, 2, 3.6, 1};
+ nntrainer::Tensor input_fp32(
+ nntrainer::TensorDim(1, 1, 1, width, t_type_nchw_fp32), a_data_fp32);
+
+ __fp16 result_neon;
+ float result_fp32;
+
+ // NEON fp16
+ result_neon = input.l2norm();
+
+ // fp32
+ result_fp32 = input_fp32.l2norm();
+
+ // absolute error
+ const float epsilon = 1e-2;
+
+ EXPECT_NEAR(result_neon, result_fp32, epsilon);
+}
+
GTEST_API_ int main(int argc, char **argv) {
int result = -1;