[Build] Decouple gtest from nntrainer_test_util
authorJihoon Lee <jhoon.it.lee@samsung.com>
Thu, 28 Jan 2021 04:56:18 +0000 (13:56 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Wed, 17 Feb 2021 03:47:30 +0000 (12:47 +0900)
As nntrainer_test_util had gtest, it was preventing from compiler checks
some trivial bugs (like sign compare) and had some weird bug.

This patch decouples gtest from nntrainer_test_util while changing gtest
to static build.

Resolves #910

**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
16 files changed:
meson.build
test/include/nntrainer_test_util.h
test/meson.build
test/nntrainer_test_util.cpp
test/tizen_capi/unittest_tizen_capi.cpp
test/tizen_capi/unittest_tizen_capi_dataset.cpp
test/tizen_capi/unittest_tizen_capi_layer.cpp
test/tizen_capi/unittest_tizen_capi_optimizer.cpp
test/unittest/unittest_databuffer_file.cpp
test/unittest/unittest_nntrainer_activations.cpp
test/unittest/unittest_nntrainer_internal.cpp
test/unittest/unittest_nntrainer_layers.cpp
test/unittest/unittest_nntrainer_lazy_tensor.cpp
test/unittest/unittest_nntrainer_modelfile.cpp
test/unittest/unittest_nntrainer_tensor.cpp
test/unittest/unittest_util_func.cpp

index 23e3461..7b5365a 100644 (file)
@@ -157,7 +157,8 @@ if get_option('enable-tflite-backbone')
   add_project_arguments('-DENABLE_TFLITE_BACKBONE=1', language:['c','cpp'])
 endif
 
-gtest_dep = dependency('gtest', required: false)
+gtest_dep = dependency('gtest', static: true, main: false, required: false)
+gtest_main_dep = dependency('gtest', static: true, main: true, required: false)
 
 opencv_dep = dependency('opencv', required: false)
 if not opencv_dep.found()
@@ -192,7 +193,11 @@ if get_option('enable-app')
 endif
 
 if get_option('enable-test')
-  subdir('test')
+  if gtest_dep.found()
+    subdir('test')
+  else
+    error('test enabled but gtest not found')
+  endif
 endif
 
 if get_option('enable-nnstreamer-tensor-filter')
index 6d37c76..e95aca7 100644 (file)
@@ -26,7 +26,6 @@
 #ifdef __cplusplus
 
 #include <fstream>
-#include <gtest/gtest.h>
 #include <unordered_map>
 
 #include <neuralnet.h>
@@ -450,20 +449,6 @@ const std::string config_str2 = "[Model]"
     }                                          \
   } while (0)
 
-#define ASSERT_EXCEPTION(TRY_BLOCK, EXCEPTION_TYPE, MESSAGE)                  \
-  try {                                                                       \
-    TRY_BLOCK                                                                 \
-    FAIL() << "exception '" << MESSAGE << "' not thrown at all!";             \
-  } catch (const EXCEPTION_TYPE &e) {                                         \
-    EXPECT_EQ(std::string(MESSAGE), e.what())                                 \
-      << " exception message is incorrect. Expected the following "           \
-         "message:\n\n"                                                       \
-      << MESSAGE << "\n";                                                     \
-  } catch (...) {                                                             \
-    FAIL() << "exception '" << MESSAGE << "' not thrown with expected type '" \
-           << #EXCEPTION_TYPE << "'!";                                        \
-  }
-
 #define RESET_CONFIG(conf_name)                              \
   do {                                                       \
     std::ifstream file_stream(conf_name, std::ifstream::in); \
index dd536ad..84a5caa 100644 (file)
@@ -1,23 +1,23 @@
 nntrainer_test_inc = include_directories('./include')
 
 nntrainer_test_deps = [
-  nntrainer_dep #gtest is linked in nntrainer_testutil_lib
+  nntrainer_dep
 ]
 
-# build test util when gtest is found
-if gtest_dep.found()
-  nntrainer_testutil_lib = static_library(
-    'nntrainer_test_util',
-    'nntrainer_test_util.cpp',
-    dependencies: [nntrainer_test_deps, gtest_dep],
-    include_directories: nntrainer_test_inc
-  )
-  nntrainer_testutil_dep = declare_dependency(
-    link_with: nntrainer_testutil_lib,
-    include_directories: nntrainer_test_inc
-  )
-  nntrainer_test_deps += nntrainer_testutil_dep
-endif
+nntrainer_testutil_lib = static_library(
+  'nntrainer_test_util',
+  'nntrainer_test_util.cpp',
+  dependencies: [nntrainer_test_deps],
+  include_directories: nntrainer_test_inc
+)
+
+nntrainer_testutil_dep = declare_dependency(
+  link_with: nntrainer_testutil_lib,
+  include_directories: nntrainer_test_inc
+)
+
+nntrainer_test_deps += nntrainer_testutil_dep
+nntrainer_test_deps += gtest_dep
 
 if get_option('enable-capi')
   subdir('tizen_capi')
index 8321962..fa34477 100644 (file)
@@ -321,7 +321,8 @@ void IniSection::setEntry(const std::string &entry_str) {
     }
 
     int status = nntrainer::getKeyValue(cur, key, value);
-    EXPECT_EQ(status, ML_ERROR_NONE);
+    NNTR_THROW_IF(status != ML_ERROR_NONE, std::invalid_argument)
+      << "getKeyValue Failed";
     entry[key] = value;
   }
 }
index 595549f..88e00a4 100644 (file)
@@ -19,6 +19,7 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
 
 #include <nntrainer.h>
 #include <nntrainer_internal.h>
@@ -903,7 +904,7 @@ TEST(nntrainer_capi_summary, summary_01_p) {
   status = ml_train_model_get_summary(handle, ML_TRAIN_SUMMARY_TENSOR, &sum);
   EXPECT_EQ(status, ML_ERROR_NONE);
 
-  EXPECT_GT(strlen(sum), 100);
+  EXPECT_GT(strlen(sum), 100u);
 
   status = ml_train_model_destroy(handle);
   EXPECT_EQ(status, ML_ERROR_NONE);
index 0514a56..3f9b7bd 100644 (file)
@@ -9,6 +9,7 @@
  * @author      Parichay Kapoor <pk.kapoor@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
 
 #include <nntrainer.h>
 #include <nntrainer_internal.h>
index 9303c1d..c205a32 100644 (file)
@@ -19,6 +19,7 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
 
 #include <nntrainer.h>
 #include <nntrainer_internal.h>
index 7587524..d284744 100644 (file)
@@ -19,6 +19,7 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
 
 #include <nntrainer.h>
 #include <nntrainer_internal.h>
index 020ca99..cabe948 100644 (file)
@@ -20,6 +20,8 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
+
 #include "nntrainer_test_util.h"
 #include <databuffer_file.h>
 #include <fstream>
index 95c6bea..7de9e5d 100644 (file)
@@ -21,9 +21,9 @@
  * @author      Parichay Kapoor <pk.kapoor@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
 
 #include <activation_layer.h>
-#include <gtest/gtest.h>
 #include <neuralnet.h>
 #include <nntrainer_error.h>
 #include <nntrainer_log.h>
index 46e4a5e..7216fa4 100644 (file)
@@ -20,6 +20,8 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
+
 #include <fstream>
 
 #include <databuffer_file.h>
index af2b893..db44ec5 100644 (file)
@@ -9,6 +9,8 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
+
 #include <fstream>
 #include <regex>
 
@@ -237,10 +239,10 @@ TEST_F(nntrainer_InputLayer, set_property_02_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   dim = layer.getInputDimension()[0];
-  EXPECT_EQ(dim.getTensorDim(0), 1);
-  EXPECT_EQ(dim.getTensorDim(1), 3);
-  EXPECT_EQ(dim.getTensorDim(2), 2);
-  EXPECT_EQ(dim.getTensorDim(3), 1);
+  EXPECT_EQ(dim.getTensorDim(0), 1u);
+  EXPECT_EQ(dim.getTensorDim(1), 3u);
+  EXPECT_EQ(dim.getTensorDim(2), 2u);
+  EXPECT_EQ(dim.getTensorDim(3), 1u);
 }
 
 TEST_F(nntrainer_InputLayer, set_property_03_p) {
@@ -249,10 +251,10 @@ TEST_F(nntrainer_InputLayer, set_property_03_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   dim = layer.getInputDimension()[0];
-  EXPECT_EQ(dim.getTensorDim(0), 1);
-  EXPECT_EQ(dim.getTensorDim(1), 3);
-  EXPECT_EQ(dim.getTensorDim(2), 2);
-  EXPECT_EQ(dim.getTensorDim(3), 1);
+  EXPECT_EQ(dim.getTensorDim(0), 1u);
+  EXPECT_EQ(dim.getTensorDim(1), 3u);
+  EXPECT_EQ(dim.getTensorDim(2), 2u);
+  EXPECT_EQ(dim.getTensorDim(3), 1u);
 }
 
 TEST_F(nntrainer_InputLayer, set_property_04_p) {
@@ -262,10 +264,10 @@ TEST_F(nntrainer_InputLayer, set_property_04_p) {
 
   /** Set input shape ignores batch size */
   dim = layer.getInputDimension()[0];
-  EXPECT_EQ(dim.getTensorDim(0), 1);
-  EXPECT_EQ(dim.getTensorDim(1), 3);
-  EXPECT_EQ(dim.getTensorDim(2), 2);
-  EXPECT_EQ(dim.getTensorDim(3), 1);
+  EXPECT_EQ(dim.getTensorDim(0), 1u);
+  EXPECT_EQ(dim.getTensorDim(1), 3u);
+  EXPECT_EQ(dim.getTensorDim(2), 2u);
+  EXPECT_EQ(dim.getTensorDim(3), 1u);
 }
 
 TEST_F(nntrainer_InputLayer, set_property_05_p) {
@@ -276,30 +278,30 @@ TEST_F(nntrainer_InputLayer, set_property_05_p) {
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   dim = layer.getInputDimension()[0];
-  EXPECT_EQ(dim.getTensorDim(0), 5);
-  EXPECT_EQ(dim.getTensorDim(1), 3);
-  EXPECT_EQ(dim.getTensorDim(2), 28);
-  EXPECT_EQ(dim.getTensorDim(3), 28);
+  EXPECT_EQ(dim.getTensorDim(0), 5u);
+  EXPECT_EQ(dim.getTensorDim(1), 3u);
+  EXPECT_EQ(dim.getTensorDim(2), 28u);
+  EXPECT_EQ(dim.getTensorDim(3), 28u);
 
   /** Original batch size is retained */
   status = setProperty("input_shape=1:3:2:1");
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   dim = layer.getInputDimension()[0];
-  EXPECT_EQ(dim.getTensorDim(0), 5);
-  EXPECT_EQ(dim.getTensorDim(1), 3);
-  EXPECT_EQ(dim.getTensorDim(2), 2);
-  EXPECT_EQ(dim.getTensorDim(3), 1);
+  EXPECT_EQ(dim.getTensorDim(0), 5u);
+  EXPECT_EQ(dim.getTensorDim(1), 3u);
+  EXPECT_EQ(dim.getTensorDim(2), 2u);
+  EXPECT_EQ(dim.getTensorDim(3), 1u);
 
   /** Original batch size is retained */
   status = setProperty("input_shape=4:3:2:1");
   EXPECT_EQ(status, ML_ERROR_NONE);
 
   dim = layer.getInputDimension()[0];
-  EXPECT_EQ(dim.getTensorDim(0), 5);
-  EXPECT_EQ(dim.getTensorDim(1), 3);
-  EXPECT_EQ(dim.getTensorDim(2), 2);
-  EXPECT_EQ(dim.getTensorDim(3), 1);
+  EXPECT_EQ(dim.getTensorDim(0), 5u);
+  EXPECT_EQ(dim.getTensorDim(1), 3u);
+  EXPECT_EQ(dim.getTensorDim(2), 2u);
+  EXPECT_EQ(dim.getTensorDim(3), 1u);
 }
 
 /**
@@ -630,7 +632,7 @@ TEST(nntrainer_FullyConnectedLayer_init_name, initialize_05_n) {
 
   /** no name is set */
   layer_name = layer0.getName();
-  EXPECT_EQ(layer_name.length(), 0);
+  EXPECT_EQ(layer_name.length(), 0u);
 
   /** Set empty name */
   status = layer0.setProperty({"name="});
@@ -1291,8 +1293,8 @@ TEST_F(nntrainer_Conv2DLayer, print_01_p) {
   std::stringstream ss, ss2;
   layer.printPreset(ss, nntrainer::Layer::PrintPreset::PRINT_ALL);
   ss2 << layer;
-  EXPECT_GT(ss.str().size(), 100);
-  EXPECT_GT(ss2.str().size(), 100);
+  EXPECT_GT(ss.str().size(), 100u);
+  EXPECT_GT(ss2.str().size(), 100u);
 }
 
 /**
index 1e6baff..0c88a6c 100644 (file)
@@ -9,6 +9,7 @@
  * @bug                No known bugs except for NYI items
  *
  */
+#include <gtest/gtest.h>
 
 #include "nntrainer_test_util.h"
 #include "util_func.h"
index ff56937..2842c4f 100644 (file)
@@ -692,7 +692,7 @@ TEST(nntrainerIniTest, backbone_p_14) {
 
   EXPECT_EQ(NN_full.getInputDimension()[0].channel(),
             NN_scaled_zero.getInputDimension()[0].channel());
-  EXPECT_EQ(1, NN_scaled_zero.getOutputDimension()[0].channel());
+  EXPECT_EQ(1u, NN_scaled_zero.getOutputDimension()[0].channel());
 }
 
 /**
@@ -791,7 +791,7 @@ TEST(nntrainerIniTest, backbone_n_18) {
   EXPECT_EQ(NN.compile(), ML_ERROR_NONE);
   EXPECT_EQ(NN.initialize(), ML_ERROR_NONE);
 
-  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 3);
+  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 3u);
 }
 
 /**
@@ -813,7 +813,7 @@ TEST(nntrainerIniTest, backbone_n_19) {
   EXPECT_EQ(NN.compile(), ML_ERROR_NONE);
   EXPECT_EQ(NN.initialize(), ML_ERROR_NONE);
 
-  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 3);
+  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 3u);
 }
 
 /**
@@ -834,7 +834,7 @@ TEST(nntrainerIniTest, backbone_p_20) {
   EXPECT_EQ(NN.loadFromConfig(ini_name), ML_ERROR_NONE);
   EXPECT_EQ(NN.compile(), ML_ERROR_NONE);
   EXPECT_EQ(NN.initialize(), ML_ERROR_NONE);
-  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 6);
+  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 6u);
 }
 
 /**
@@ -856,7 +856,7 @@ TEST(nntrainerIniTest, backbone_n_21) {
   EXPECT_EQ(NN.loadFromConfig(ini_name), ML_ERROR_NONE);
   EXPECT_EQ(NN.compile(), ML_ERROR_NONE);
   EXPECT_EQ(NN.initialize(), ML_ERROR_NONE);
-  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 3);
+  EXPECT_EQ(NN.getNetworkGraph().getSorted().size(), 3u);
   // EXPECT_EQ(NN.getGraph().size(), 3);
 }
 
index defe341..d73b440 100644 (file)
@@ -9,6 +9,8 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include <gtest/gtest.h>
+
 #include "nntrainer_test_util.h"
 #include "util_func.h"
 #include <fstream>
@@ -72,10 +74,10 @@ TEST(nntrainer_TensorDim, setTensorDim_04_p) {
   d.setTensorDim(2, 6);
   d.setTensorDim(3, 7);
 
-  EXPECT_EQ(d.batch(), 4);
-  EXPECT_EQ(d.channel(), 5);
-  EXPECT_EQ(d.height(), 6);
-  EXPECT_EQ(d.width(), 7);
+  EXPECT_EQ(d.batch(), 4u);
+  EXPECT_EQ(d.channel(), 5u);
+  EXPECT_EQ(d.height(), 6u);
+  EXPECT_EQ(d.width(), 7u);
 }
 
 TEST(nntrainer_Tensor, TensorWrap_p) {
@@ -2371,10 +2373,10 @@ TEST(nntrainer_Tensor, reshape_n_02) {
 
   /** Changing the dim of a tensor only affects local copy of the dim */
   A_dim.setTensorDim(1, 100);
-  EXPECT_EQ(A_dim.getTensorDim(1), 100);
+  EXPECT_EQ(A_dim.getTensorDim(1), 100u);
 
   nntrainer::TensorDim A_dim_2 = A.getDim();
-  EXPECT_EQ(A_dim_2.getTensorDim(1), 4);
+  EXPECT_EQ(A_dim_2.getTensorDim(1), 4u);
 }
 
 TEST(nntrainer_Tensor, copy_and_reshape_n) {
index 7e145bd..f28a931 100644 (file)
@@ -20,8 +20,8 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
-
 #include <gtest/gtest.h>
+
 #include <nntrainer_error.h>
 #include <nntrainer_log.h>
 #include <nntrainer_logger.h>