[neuralnet] Iteration bug fix in learning rate
authorParichay Kapoor <pk.kapoor@samsung.com>
Tue, 16 Jun 2020 09:51:58 +0000 (18:51 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 16 Jun 2020 10:12:25 +0000 (19:12 +0900)
learning rate is decayed using the iteration
however current implementation was using epoch count

Signed-off-by: Parichay Kapoor <pk.kapoor@samsung.com>
nntrainer/include/neuralnet.h
nntrainer/src/neuralnet.cpp

index 458ba38..d84ad02 100644 (file)
@@ -330,6 +330,11 @@ private:
    * @brief     Data Buffer to get Input
    */
   std::shared_ptr<DataBuffer> data_buffer;
+
+  /**
+   * @brief     Number of iterations trained
+   */
+  int iter;
 };
 
 } /* namespace nntrainer */
index f532f21..e2dd784 100644 (file)
@@ -91,19 +91,7 @@ std::vector<std::string> parseLayerName(std::string ll) {
   return ret;
 }
 
-NeuralNetwork::NeuralNetwork() {
-  batch_size = 0;
-  learning_rate = 0.0;
-  decay_rate = 0.0;
-  decay_steps = 0.0;
-  epoch = 0;
-  loss = 0.0;
-  cost = COST_UNKNOWN;
-  weight_ini = WEIGHT_UNKNOWN;
-  net_type = NET_UNKNOWN;
-  data_buffer = NULL;
-  config = "";
-}
+NeuralNetwork::NeuralNetwork() : NeuralNetwork("") {}
 
 NeuralNetwork::NeuralNetwork(std::string config) {
   batch_size = 0;
@@ -116,6 +104,7 @@ NeuralNetwork::NeuralNetwork(std::string config) {
   weight_ini = WEIGHT_UNKNOWN;
   net_type = NET_UNKNOWN;
   data_buffer = NULL;
+  iter = 0;
   this->setConfig(config);
 }
 
@@ -772,7 +761,6 @@ int NeuralNetwork::train_run() {
 
   float training_loss = 0.0;
   for (unsigned int i = 0; i < epoch; ++i) {
-    int count = 0;
 
     status = data_buffer->run(nntrainer::BUF_TRAIN);
     if (status != ML_ERROR_NONE) {
@@ -792,15 +780,14 @@ int NeuralNetwork::train_run() {
       vec_4d in, label;
       if (data_buffer->getDataFromBuffer(nntrainer::BUF_TRAIN, in, label)) {
         status =
-          backwarding(nntrainer::Tensor(in), nntrainer::Tensor(label), i);
+          backwarding(nntrainer::Tensor(in), nntrainer::Tensor(label), iter++);
         if (status != ML_ERROR_NONE) {
           data_buffer->clear(nntrainer::BUF_TRAIN);
           ml_loge ("Error: training error in #%d/%d.", i+1, epoch);
           return status;
         }
-        count++;
         std::cout << "#" << i + 1 << "/" << epoch;
-        data_buffer->displayProgress(count, nntrainer::BUF_TRAIN, getLoss());
+        data_buffer->displayProgress(iter, nntrainer::BUF_TRAIN, getLoss());
       } else {
         data_buffer->clear(nntrainer::BUF_TRAIN);
         break;