[model] implement save best acc logic
authorJihoon Lee <jhoon.it.lee@samsung.com>
Tue, 7 Sep 2021 12:26:05 +0000 (21:26 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Thu, 9 Sep 2021 01:22:03 +0000 (10:22 +0900)
This patch implements saving best accuracy when training

**Self evaluation:**
1. Build test: [X]Passed [ ]Failed [ ]Skipped
2. Run test: [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: Jihoon Lee <jhoon.it.lee@samsung.com>
nntrainer/models/model_common_properties.h
nntrainer/models/neuralnet.cpp
nntrainer/models/neuralnet.h

index cdb02159db4d3e59f8104fc11e59682200263828..e06e4ef45771071a9d525596e94bdcd30e47f0bb 100644 (file)
@@ -62,6 +62,17 @@ public:
   using prop_tag = str_prop_tag;                  /**< property type */
 };
 
+/**
+ * @brief model save path property
+ *
+ */
+class SaveBestPath : public Property<std::string> {
+public:
+  static constexpr const char *key =
+    "save_best_path";            /**< unique key to access */
+  using prop_tag = str_prop_tag; /**< property type */
+};
+
 /**
  * @brief model batch size property
  *
index a31901d13c4bcd17becfba03a9bd5f0f897ab9e4..734519ea0836139097c802f1e46acd67562187a0 100644 (file)
@@ -50,7 +50,8 @@ namespace nntrainer {
 NeuralNetwork::NeuralNetwork(AppContext app_context_, bool in_place_opt) :
   model_props(props::LossType()),
   model_flex_props(props::Epochs(), props::TrainingBatchSize(),
-                   props::SavePath(), props::ContinueTrain()),
+                   props::SavePath(), props::ContinueTrain(),
+                   props::SaveBestPath()),
   load_path(std::string()),
   epoch_idx(0),
   iter(0),
@@ -675,11 +676,25 @@ int NeuralNetwork::train_run() {
     stat.loss += getLoss();
   };
 
-  auto eval_epoch_end = [this, batch_size](RunStats &stat, DataBuffer &buffer) {
+  auto eval_epoch_end = [this, batch_size, max_acc = 0.0f,
+                         min_loss = std::numeric_limits<float>::max()](
+                          RunStats &stat, DataBuffer &buffer) mutable {
     stat.loss /= static_cast<float>(stat.num_iterations);
     stat.accuracy = stat.num_correct_predictions /
                     static_cast<float>(stat.num_iterations * batch_size) *
                     100.0f;
+
+    if (stat.accuracy > max_acc ||
+        (stat.accuracy == max_acc && stat.loss < min_loss)) {
+      max_acc = stat.accuracy;
+      /// @note this is not actually 'the' min loss for whole time but records
+      /// when data change
+      min_loss = stat.loss;
+      auto &save_best_path = std::get<props::SaveBestPath>(model_flex_props);
+      if (!save_best_path.empty()) {
+        save(save_best_path);
+      }
+    }
     std::cout << " >> [ Accuracy: " << validation.accuracy
               << "% - Validation Loss : " << validation.loss << " ]";
   };
@@ -694,6 +709,7 @@ int NeuralNetwork::train_run() {
     }
     std::cout << '\n';
   }
+
   if (test_buffer) {
     std::cout << "Evaluation with test data...\n";
     testing =
index 47946e77ee67946b6749a390485a60b9200788b5..746f413b130ff527088e1862f6cfd275b36ca396 100644 (file)
@@ -502,7 +502,8 @@ public:
 
 private:
   using FlexiblePropTypes =
-    std::tuple<props::Epochs, props::TrainingBatchSize, props::SavePath, props::ContinueTrain>;
+    std::tuple<props::Epochs, props::TrainingBatchSize, props::SavePath,
+               props::ContinueTrain, props::SaveBestPath>;
   using RigidPropTypes = std::tuple<props::LossType>;
 
   RigidPropTypes model_props;         /**< model props */