[blas/neon] isamax edge cases unit tests
authorDebadri Samaddar <s.debadri@samsung.com>
Thu, 30 May 2024 07:30:54 +0000 (13:00 +0530)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 4 Jun 2024 09:57:23 +0000 (18:57 +0900)
Added tests for UINT16_MAX boundary cases of isamax

Signed-off-by: Debadri Samaddar <s.debadri@samsung.com>
test/unittest/unittest_nntrainer_tensor_neon_fp16.cpp

index ed4a04a71b055b814a52b0b8c5ffaea03a714f2f..2c81bdcbd40f2b56f1714e299e48eb5e4ee3ad11 100644 (file)
@@ -384,7 +384,7 @@ TEST(nntrainer_Tensor, max_abs) {
   EXPECT_NEAR(result_neon, result_fp32, epsilon);
 }
 
-TEST(nntrainer_Tensor, max_abs_768) {
+TEST(nntrainer_Tensor, max_abs_768_768) {
 
   nntrainer::TensorDim::TensorType t_type_nchw_fp16 = {
     nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16};
@@ -425,6 +425,88 @@ TEST(nntrainer_Tensor, max_abs_768) {
   EXPECT_IN_RANGE(absErrorNeon, 0, epsilon);
 }
 
+TEST(nntrainer_Tensor, max_abs_65535) {
+
+  nntrainer::TensorDim::TensorType t_type_nchw_fp16 = {
+    nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16};
+
+  nntrainer::TensorDim::TensorType t_type_nchw_fp32 = {
+    nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP32};
+
+  size_t batch = 1;
+  size_t channel = 1;
+  size_t height = 1;
+  size_t width = UINT16_MAX;
+
+  nntrainer::Tensor input(
+    nntrainer::TensorDim(batch, channel, height, width, t_type_nchw_fp16));
+
+  nntrainer::Tensor input_fp32(
+    nntrainer::TensorDim(batch, channel, height, width, t_type_nchw_fp32));
+
+  const float alpha = 1e-1;
+  const int MOD = 10;
+
+  GEN_TEST_INPUT(input, ((k * l * (batch * height * channel) +
+                          l * (batch * height) + k * (width) + l + 1) %
+                         MOD) *
+                          alpha);
+  GEN_TEST_INPUT(input_fp32, ((k * l * (batch * height * channel) +
+                               l * (batch * height) + k * (width) + l + 1) %
+                              MOD) *
+                               alpha);
+
+  __fp16 result_neon = input.max_abs();
+  float result_fp32 = input_fp32.max_abs();
+
+  float absErrorNeon = std::abs(result_neon - result_fp32);
+
+  const float epsilon = 1e-3;
+
+  EXPECT_IN_RANGE(absErrorNeon, 0, epsilon);
+}
+
+TEST(nntrainer_Tensor, max_abs_65534) {
+
+  nntrainer::TensorDim::TensorType t_type_nchw_fp16 = {
+    nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP16};
+
+  nntrainer::TensorDim::TensorType t_type_nchw_fp32 = {
+    nntrainer::Tformat::NCHW, nntrainer::Tdatatype::FP32};
+
+  size_t batch = 1;
+  size_t channel = 1;
+  size_t height = 1;
+  size_t width = UINT16_MAX - 1;
+
+  nntrainer::Tensor input(
+    nntrainer::TensorDim(batch, channel, height, width, t_type_nchw_fp16));
+
+  nntrainer::Tensor input_fp32(
+    nntrainer::TensorDim(batch, channel, height, width, t_type_nchw_fp32));
+
+  const float alpha = 1e-1;
+  const int MOD = 10;
+
+  GEN_TEST_INPUT(input, ((k * l * (batch * height * channel) +
+                          l * (batch * height) + k * (width) + l + 1) %
+                         MOD) *
+                          alpha);
+  GEN_TEST_INPUT(input_fp32, ((k * l * (batch * height * channel) +
+                               l * (batch * height) + k * (width) + l + 1) %
+                              MOD) *
+                               alpha);
+
+  __fp16 result_neon = input.max_abs();
+  float result_fp32 = input_fp32.max_abs();
+
+  float absErrorNeon = std::abs(result_neon - result_fp32);
+
+  const float epsilon = 1e-3;
+
+  EXPECT_IN_RANGE(absErrorNeon, 0, epsilon);
+}
+
 TEST(nntrainer_Tensor, sum_gemv_transpose_2_10) {
   int batch = 3;
   int channel = 2;