[TCM] Add few negative cases
authorJihoon Lee <jhoon.it.lee@samsung.com>
Thu, 13 Aug 2020 07:35:04 +0000 (16:35 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 18 Aug 2020 01:13:58 +0000 (10:13 +0900)
**Changes proposed in this PR:**
- Add losslayer / util negative cases
- Change ASSERT_EXCEPTION -> EXPECT_THROW

**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
test/unittest/unittest_nntrainer_layers.cpp
test/unittest/unittest_nntrainer_modelfile.cpp
test/unittest/unittest_nntrainer_tensor.cpp
test/unittest/unittest_util_func.cpp

index e45f78e..d04c5eb 100644 (file)
@@ -1357,16 +1357,86 @@ TEST(nntrainer_LossLayer, setCost_02_n) {
   EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
 }
 
+TEST(nntrainer_LossLayer, forward_nolabel_n) {
+  nntrainer::LossLayer layer;
+  nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+  EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a)), std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, forward_cost_unknown_n) {
+  nntrainer::LossLayer layer;
+  nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+  nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
+  EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
+               std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, backward_cost_unknown_n) {
+  nntrainer::LossLayer layer;
+  nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+  EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, forward_cost_forward_entropy_n) {
+  nntrainer::LossLayer layer;
+  layer.setCost(nntrainer::COST_ENTROPY);
+  nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+  nntrainer::Tensor b = constant(1.0, 1, 1, 1, 1);
+  EXPECT_THROW(layer.forwarding(MAKE_SHARED_TENSOR(a), MAKE_SHARED_TENSOR(b)),
+               std::runtime_error);
+}
+
+TEST(nntrainer_LossLayer, backward_cost_backward_entropy_n) {
+  nntrainer::LossLayer layer;
+  layer.setCost(nntrainer::COST_ENTROPY);
+  nntrainer::Tensor a = constant(1.0, 1, 1, 1, 1);
+  EXPECT_THROW(layer.backwarding(MAKE_SHARED_TENSOR(a), 1), std::runtime_error);
+}
+
 /**
  * @brief Loss Layer
  */
-TEST(nntrainer_LossLayer, setProperty_01_n) {
+TEST(nntrainer_LossLayer, setProperty_through_vector_n) {
   int status = ML_ERROR_NONE;
   nntrainer::LossLayer layer;
   status = layer.setProperty({"loss=cross"});
   EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
 }
 
+TEST(nntrainer_LossLayer, init_with_last_false_n) {
+  int status = ML_ERROR_NONE;
+  nntrainer::LossLayer layer;
+  status = layer.initialize(false);
+  EXPECT_EQ(status, ML_ERROR_INVALID_PARAMETER);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual_n) {
+  nntrainer::LossLayer layer;
+  EXPECT_THROW(
+    layer.setProperty(nntrainer::Layer::PropertyType::input_shape, "1:2:3:4"),
+    nntrainer::exception::not_supported);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual2_n) {
+  nntrainer::LossLayer layer;
+  EXPECT_THROW(layer.setProperty(nntrainer::Layer::PropertyType::filter, "1:2"),
+               nntrainer::exception::not_supported);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual3_n) {
+  nntrainer::LossLayer layer;
+  EXPECT_THROW(layer.setProperty(nntrainer::Layer::PropertyType::input_shape,
+                                 "invalid_string"),
+               nntrainer::exception::not_supported);
+}
+
+TEST(nntrainer_LossLayer, setProperty_individual4_n) {
+  nntrainer::LossLayer layer;
+  EXPECT_THROW(
+    layer.setProperty(nntrainer::Layer::PropertyType::filter, "invalid_string"),
+    nntrainer::exception::not_supported);
+}
+
 TEST(nntrainer_ActivationLayer, init_01_n) {
   nntrainer::ActivationLayer layer;
   EXPECT_THROW(layer.initialize(false), std::invalid_argument);
index 92a4c21..577aad0 100644 (file)
@@ -31,7 +31,7 @@ TEST_P(nntrainerIniTest, loadConfig) {
 /**
  * @brief Negative test given ini is failing at loadingTwice
  */
-TEST_P(nntrainerIniTest, loadConfigTwice) {
+TEST_P(nntrainerIniTest, loadConfigTwice_n) {
   std::cout << std::get<0>(GetParam()) << std::endl;
   NN.loadFromConfig();
   int status = NN.loadFromConfig();
@@ -71,7 +71,7 @@ TEST_P(nntrainerIniTest, initTwice_n) {
  * @brief check given ini is failing/succeeding when init happens three times.
  * this should fail at all time.
  */
-TEST_P(nntrainerIniTest, initThreetime) {
+TEST_P(nntrainerIniTest, initThreetime_n) {
   std::cout << std::get<0>(GetParam()) << std::endl;
   int status = NN.loadFromConfig();
   status = NN.init();
@@ -82,7 +82,6 @@ TEST_P(nntrainerIniTest, initThreetime) {
 }
 
 /// @todo add run test could be added with iniTest flag to control skip
-
 static IniSection nw_base("network", "Type = NeuralNetwork | "
                                      "minibatch = 32 | "
                                      "epsilon = 1e-7 | "
index e742b8b..b37d5d9 100644 (file)
@@ -235,8 +235,7 @@ TEST(nntrainer_Tensor, multiply_03_n) {
 
   nntrainer::Tensor test(batch - 1, height - 1, width - 1);
 
-  ASSERT_EXCEPTION({ input.multiply(test); }, std::runtime_error,
-                   "Error: Dimension must be equal each other");
+  EXPECT_THROW({ input.multiply(test); }, std::runtime_error);
 }
 
 TEST(nntrainer_Tensor, multiply_float_01_p) {
@@ -362,8 +361,7 @@ TEST(nntrainer_Tensor, divide_02_n) {
   nntrainer::Tensor input(batch, channel, height, width);
   GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k + 1);
 
-  ASSERT_EXCEPTION({ input.divide(0.0); }, std::runtime_error,
-                   "Error: Divide by zero");
+  EXPECT_THROW({ input.divide(0.0); }, std::runtime_error);
 }
 
 TEST(nntrainer_Tensor, divide_03_n) {
@@ -377,8 +375,7 @@ TEST(nntrainer_Tensor, divide_03_n) {
 
   nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
 
-  ASSERT_EXCEPTION({ input.divide(test); }, std::runtime_error,
-                   "Error: Dimension must be equal each other");
+  EXPECT_THROW({ input.divide(test); }, std::runtime_error);
 }
 
 TEST(nntrainer_Tensor, add_i_01_p) {
@@ -517,8 +514,7 @@ TEST(nntrainer_Tensor, add_03_n) {
 
   nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
 
-  ASSERT_EXCEPTION({ input.add(test); }, std::runtime_error,
-                   "Error: Dimension must be equal each other");
+  EXPECT_THROW({ input.add(test); }, std::runtime_error);
 }
 
 TEST(nntrainer_Tensor, subtract_i_01_p) {
@@ -649,8 +645,7 @@ TEST(nntrainer_Tensor, subtract_03_n) {
 
   nntrainer::Tensor test(batch - 1, channel, height - 1, width - 1);
 
-  ASSERT_EXCEPTION({ input.subtract(test); }, std::runtime_error,
-                   "Error: Dimension must be equal each other");
+  EXPECT_THROW({ input.subtract(test); }, std::runtime_error);
 }
 
 TEST(nntrainer_Tensor, subtract_float_01_p) {
@@ -679,8 +674,7 @@ TEST(nntrainer_Tensor, sum_01_n) {
   nntrainer::Tensor input(batch, channel, height, width);
   GEN_TEST_INPUT(input, i * (batch * height) + j * (width) + k);
 
-  ASSERT_EXCEPTION({ input.sum(4); }, std::out_of_range,
-                   "Error: Cannot exceede 3");
+  EXPECT_THROW({ input.sum(4); }, std::out_of_range);
 }
 
 TEST(nntrainer_Tensor, sum_02_p) {
@@ -936,7 +930,7 @@ TEST(nntrainer_Tensor, copy_and_shares_variable_p) {
 }
 
 /// #412
-TEST(nntrainer_Tensor, copy_and_resize_n) {
+TEST(nntrainer_Tensor, DISABLED_copy_and_resize_n) {
   nntrainer::Tensor A = constant(1.0f, 3, 4, 5, 6);
   nntrainer::Tensor B = A;
   nntrainer::Tensor C = A.clone();
@@ -944,7 +938,8 @@ TEST(nntrainer_Tensor, copy_and_resize_n) {
   /// this is undefined behavior
   B.setDim(nntrainer::TensorDim(9, 9, 9, 9));
 
-  /// @todo add appropriate test.
+  /// @todo replace this to appropriate test;
+  EXPECT_TRUE(true);
 }
 
 /// @note this test case demonstrates it is dangeruous to use sharedConstTensor
index c093b3a..2a662ac 100644 (file)
@@ -24,7 +24,9 @@
 #include <gtest/gtest.h>
 #include <nntrainer_error.h>
 #include <nntrainer_log.h>
+#include <nntrainer_logger.h>
 #include <nntrainer_test_util.h>
+#include <parse_util.h>
 #include <util_func.h>
 
 TEST(nntrainer_util_func, sqrtFloat_01_p) {
@@ -59,6 +61,36 @@ TEST(nntrainer_util_func, logFloat_01_p) {
   }
 }
 
+TEST(nntrainer_parse_util, throw_status_no_error_p) {
+  EXPECT_NO_THROW(nntrainer::throw_status(ML_ERROR_NONE));
+}
+
+TEST(nntrainer_parse_util, throw_status_invalid_argument_n) {
+  EXPECT_THROW(nntrainer::throw_status(ML_ERROR_INVALID_PARAMETER),
+               std::invalid_argument);
+}
+
+TEST(nntrainer_parse_util, throw_status_out_of_memory_n) {
+  EXPECT_THROW(nntrainer::throw_status(ML_ERROR_OUT_OF_MEMORY), std::bad_alloc);
+}
+
+TEST(nntrainer_parse_util, throw_status_timed_out_n) {
+  EXPECT_THROW(nntrainer::throw_status(ML_ERROR_TIMED_OUT), std::runtime_error);
+}
+
+TEST(nntrainer_parse_util, throw_status_permission_denied_n) {
+  EXPECT_THROW(nntrainer::throw_status(ML_ERROR_PERMISSION_DENIED),
+               std::runtime_error);
+}
+
+TEST(nntrainer_parse_util, throw_status_unknown_error_n) {
+  EXPECT_THROW(nntrainer::throw_status(ML_ERROR_UNKNOWN), std::runtime_error);
+}
+
+TEST(nntrainer_parse_util, throw_status_default_n) {
+  EXPECT_THROW(nntrainer::throw_status(-12345), std::runtime_error);
+}
+
 /**
  * @brief Main gtest
  */