[manager] Check on re-initialize
authorParichay Kapoor <pk.kapoor@samsung.com>
Wed, 3 Mar 2021 11:08:03 +0000 (20:08 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Mon, 8 Mar 2021 07:45:43 +0000 (16:45 +0900)
Add a check on re-initialize for the ccapi unittest
As re-initialize is removed, the memory intialization is also updated
which changes the final loss values

**Self evaluation:**
1. Build test: [x]Passed [ ]Failed [ ]Skipped
2. Run test: [x]Passed [ ]Failed [ ]Skipped

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
nntrainer/tensor/manager.cpp
nntrainer/tensor/manager.h
test/ccapi/unittest_ccapi.cpp
test/unittest/unittest_nntrainer_models.cpp

index 97fecb4..694da39 100644 (file)
@@ -248,6 +248,9 @@ void Manager::initializeWeights() {
     return;
   }
 
+  if (weights_initialized)
+    return;
+
   AllocFunc allocate_weight = getAllocFunc(true);
 
   unsigned int weight_offset = 0;
@@ -266,10 +269,14 @@ void Manager::initializeWeights() {
   }
 
   weights_initialized = true;
+  /** weights are allocated without delay */
   weights_allocated = true;
 }
 
 void Manager::allocateWeights() {
+  if (weights_allocated)
+    return;
+
   for (auto &l_w : weights) {
     for (auto &w : l_w) {
       Weight &weight = w.get();
@@ -491,18 +498,21 @@ void Manager::initializeTensors(bool trainable) {
   if (!weights_initialized)
     initializeWeights();
 
-  // Allocate gradients
+  if (tensors_initialized)
+    return;
+
+  // Initialize gradients
   if (trainable)
     initializeGradients();
 
-  // Allocate shared derivative memory
+  // Initialize shared derivative memory
   if (max_derivative_size > 0 && enable_activation_memory_opt && trainable)
     shared_deriv = Tensor(TensorDim({max_derivative_size}), false);
 
   // @todo Do not count memory of the input tensor of the input layer in the
   // estimate of max_shared_inout as it is not used
 
-  // Allocate shared input/output memory for inference
+  // Initialize shared input/output memory for inference
   // @note Memory for label is not allocated here as inference doesnt has label
   if (!trainable && enable_inference_inout_memory_opt)
     shared_inout = Tensor(TensorDim({max_shared_inout}), false);
index 62e14b0..5e6c2a9 100644 (file)
@@ -173,8 +173,6 @@ public:
    */
   void reset() {
     deallocateTensors(true);
-    weights_allocated = false;
-    tensors_allocated = false;
 
     total_weight_size = 0;
     total_grad_size = 0;
index 753582d..c79d0ca 100644 (file)
@@ -371,9 +371,9 @@ TEST(nntrainer_ccapi, train_batch_size_update_after) {
   EXPECT_EQ(model->setProperty({"batch_size=4"}), ML_ERROR_NONE);
   EXPECT_NO_THROW(model->train());
 
-  EXPECT_FLOAT_EQ(model->getTrainingLoss(), 1.9582682);
-  EXPECT_FLOAT_EQ(model->getValidationLoss(), 2.1831701);
-  EXPECT_FLOAT_EQ(model->getLoss(), 2.1985414);
+  EXPECT_FLOAT_EQ(model->getTrainingLoss(), 1.9613363);
+  EXPECT_FLOAT_EQ(model->getValidationLoss(), 2.1835098);
+  EXPECT_FLOAT_EQ(model->getLoss(), 2.1977143);
 }
 
 /**
index 2f6e6e6..63f0824 100644 (file)
@@ -762,9 +762,13 @@ INSTANTIATE_TEST_CASE_P(
     mkModelTc(conv_same_padding_multi_stride, "3:1:1:10", 10),
     mkModelTc(conv_no_loss_validate, "3:1:1:10", 1),
     mkModelTc(conv_none_loss_validate, "3:1:1:10", 1)
-), [](const testing::TestParamInfo<nntrainerModelTest::ParamType>& info){
- return std::get<0>(info.param).getName();
-});
+// / #if gtest_version <= 1.7.0
+));
+/// #else gtest_version > 1.8.0
+// ), [](const testing::TestParamInfo<nntrainerModelTest::ParamType>& info){
+//  return std::get<0>(info.param).getName();
+// });
+/// #end if */
 // clang-format on
 
 /**