Remove Duplicated Codes @UpdateData
authorjijoong.moon <jijoong.moon@samsung.com>
Thu, 9 Apr 2020 04:36:56 +0000 (13:36 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Fri, 10 Apr 2020 06:15:07 +0000 (15:15 +0900)
Remove Duplicated Codes @UpdataData

**Self evaluation:**
1. Build test:  [X]Passed [ ]Failed [ ]Skipped
2. Run test:  [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
jni/Android.mk
nntrainer/src/databuffer.cpp
nntrainer/src/layers.cpp
nntrainer/src/neuralnet.cpp
nntrainer/src/nntrainer_logger.cpp
nntrainer/src/optimizer.cpp
nntrainer/src/tensor.cpp
nntrainer/src/util_func.cpp

index d2426b2..1333d79 100644 (file)
@@ -28,7 +28,7 @@ NNTRAINER_SRCS := $(NNTRAINER_ROOT)/nntrainer/src/neuralnet.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/util_func.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/optimizer.cpp \
 
-NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer
+NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include
 
 INIPARSER_SRCS := $(INIPARSER_ROOT)/src/iniparser.c \
                   $(INIPARSER_ROOT)/src/dictionary.c
index d9ec1a9..a5a7ef0 100644 (file)
  *
  */
 
-#include "include/databuffer.h"
+#include "databuffer.h"
 #include <assert.h>
+#include <nntrainer_log.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <climits>
 #include <condition_variable>
 #include <cstring>
 #include <functional>
@@ -100,171 +102,115 @@ bool DataBuffer::init(int mini_batch, unsigned int train_bufsize, unsigned int v
 }
 
 void DataBuffer::UpdateData(buffer_type type, std::ifstream &file) {
+  unsigned int max_size = 0;
+  unsigned int buf_size = 0;
+  unsigned int *rest_size = NULL;
+  unsigned int *cur_size = NULL;
+  bool *running = NULL;
+  std::vector<std::vector<float>> *data = NULL;
+  std::vector<std::vector<float>> *datalabel = NULL;
+
   switch (type) {
-    case BUF_TRAIN: {
-      std::vector<unsigned int> mark;
-      mark.resize(max_train);
-      file.seekg(0, std::ios_base::end);
-      int64_t file_length = file.tellg();
+    case BUF_TRAIN:
+      max_size = max_train;
+      buf_size = train_bufsize;
+      rest_size = &rest_train;
+      cur_size = &cur_train_bufsize;
+      running = &train_running;
+      data = &trainData;
+      datalabel = &trainDataLabel;
+      break;
+    case BUF_VAL:
+      max_size = max_val;
+      buf_size = val_bufsize;
+      rest_size = &rest_val;
+      cur_size = &cur_val_bufsize;
+      running = &val_running;
+      data = &valData;
+      datalabel = &valDataLabel;
+      break;
+    case BUF_TEST:
+      max_size = max_test;
+      buf_size = test_bufsize;
+      rest_size = &rest_test;
+      cur_size = &cur_test_bufsize;
+      running = &test_running;
+      data = &testData;
+      datalabel = &testDataLabel;
+      break;
+    default:
+      break;
+  }
 
-      for (unsigned int i = 0; i < max_train; ++i) {
-        mark[i] = i;
-      }
+  unsigned int I;
+  std::vector<unsigned int> mark;
+  mark.resize(max_size);
+  file.seekg(0, std::ios_base::end);
+  uint64_t file_length = file.tellg();
 
-      unsigned int I;
-      while (train_running && mark.size() != 0) {
-        if (train_bufsize - cur_train_bufsize > 0 && rest_train > 0) {
-          data_lock.lock();
-          std::vector<float> vec;
-          std::vector<float> veclabel;
+  for (unsigned int i = 0; i < max_size; ++i) {
+    mark[i] = i;
+  }
 
-          unsigned int id = rangeRandom(0, mark.size() - 1);
-          I = mark[id];
-          if (I > max_test)
-            throw std::runtime_error("Error: Test case id cannot exceed maximum number of test");
+  while ((*running) && mark.size() != 0) {
+    if (buf_size - (*cur_size) > 0 && (*rest_size) > 0) {
+      std::vector<float> vec;
+      std::vector<float> veclabel;
 
-          mark.erase(mark.begin() + id);
+      unsigned int id = rangeRandom(0, mark.size() - 1);
+      I = mark[id];
+      if (I > max_size)
+        ml_loge("Error: Test case id cannot exceed maximum number of test");
 
-          int64_t position = (I * input_size + I * class_num) * sizeof(float);
+      mark.erase(mark.begin() + id);
+      uint64_t position = (I * input_size + I * class_num) * sizeof(float);
 
-          if (position > file_length)
-            throw std::runtime_error("Error: Cannot exceed max file size");
+      if (position > file_length || position > ULLONG_MAX)
+        ml_loge("Error: Cannot exceed max file size");
 
-          file.seekg(position, std::ios::beg);
+      file.seekg(position, std::ios::beg);
 
-          for (unsigned int j = 0; j < input_size; ++j) {
-            float data;
-            file.read((char *)&data, sizeof(float));
-            vec.push_back(data);
-          }
-          trainData.push_back(vec);
-          for (unsigned int j = 0; j < class_num; ++j) {
-            float data;
-            file.read((char *)&data, sizeof(float));
-            veclabel.push_back(data);
-          }
-          trainDataLabel.push_back(veclabel);
-          rest_train--;
-          cur_train_bufsize++;
-          data_lock.unlock();
-        }
-        if (train_bufsize == cur_train_bufsize) {
-          std::lock_guard<std::mutex> lgtrain(readyTrainData);
-          trainReadyFlag = true;
-          cv_train.notify_all();
-        }
-      }
-    } break;
-    case BUF_VAL: {
-      unsigned int I;
-      std::vector<unsigned int> mark;
-      mark.resize(max_val);
-      file.seekg(0, std::ios_base::end);
-      int64_t file_length = file.tellg();
-
-      for (unsigned int i = 0; i < max_val; ++i) {
-        mark[i] = i;
+      for (unsigned int j = 0; j < input_size; ++j) {
+        float d;
+        file.read((char *)&d, sizeof(float));
+        vec.push_back(d);
       }
 
-      while (val_running && mark.size() != 0) {
-        if (val_bufsize - cur_val_bufsize > 0 && rest_val > 0) {
-          data_lock.lock();
-          std::vector<float> vec;
-          std::vector<float> veclabel;
-
-          unsigned int id = rangeRandom(0, mark.size() - 1);
-          I = mark[id];
-          if (I > max_test)
-            throw std::runtime_error("Error: Test case id cannot exceed maximum number of test");
-
-          mark.erase(mark.begin() + id);
-
-          int64_t position = (I * input_size + I * class_num) * sizeof(float);
-
-          if (position > file_length)
-            throw std::runtime_error("Error: Cannot exceed max file size");
-
-          file.seekg(position, std::ios::beg);
+      for (unsigned int j = 0; j < class_num; ++j) {
+        float d;
+        file.read((char *)&d, sizeof(float));
+        veclabel.push_back(d);
+      }
 
-          for (unsigned int j = 0; j < input_size; ++j) {
-            float data;
-            file.read((char *)&data, sizeof(float));
-            vec.push_back(data);
-          }
-          valData.push_back(vec);
-          for (unsigned int j = 0; j < class_num; ++j) {
-            float data;
-            file.read((char *)&data, sizeof(float));
-            veclabel.push_back(data);
-          }
-          valDataLabel.push_back(veclabel);
-          rest_val--;
-          cur_val_bufsize++;
-          data_lock.unlock();
-        }
-        if (val_bufsize == cur_val_bufsize) {
+      data_lock.lock();
+      data->push_back(vec);
+      datalabel->push_back(veclabel);
+      (*rest_size)--;
+      (*cur_size)++;
+      data_lock.unlock();
+    }
+
+    if (buf_size == (*cur_size)) {
+      switch (type) {
+        case ::BUF_TRAIN: {
+          std::lock_guard<std::mutex> lgtrain(readyTrainData);
+          trainReadyFlag = true;
+          cv_train.notify_all();
+        } break;
+        case ::BUF_VAL: {
           std::lock_guard<std::mutex> lgval(readyValData);
           valReadyFlag = true;
           cv_val.notify_all();
-        }
-      }
-    } break;
-    case BUF_TEST: {
-      unsigned int I;
-      std::vector<int> mark;
-      mark.resize(max_test);
-      file.seekg(0, std::ios_base::end);
-      int64_t file_length = file.tellg();
-
-      for (unsigned int i = 0; i < max_test; ++i) {
-        mark[i] = i;
-      }
-
-      while (test_running && mark.size() != 0) {
-        if (test_bufsize - cur_test_bufsize >= 0 && rest_test > 0) {
-          data_lock.lock();
-          std::vector<float> vec;
-          std::vector<float> veclabel;
-
-          unsigned int id = rangeRandom(0, mark.size() - 1);
-          I = mark[id];
-          if (I > max_test)
-            throw std::runtime_error("Error: Test case id cannot exceed maximum number of test");
-
-          mark.erase(mark.begin() + id);
-
-          int64_t position = (I * input_size + I * class_num) * sizeof(float);
-
-          if (position > file_length)
-            throw std::runtime_error("Error: Cannot exceed max file size");
-
-          file.seekg(position, std::ios::beg);
-
-          for (unsigned int j = 0; j < input_size; ++j) {
-            float data;
-            file.read((char *)&data, sizeof(float));
-            vec.push_back(data);
-          }
-          testData.push_back(vec);
-          for (unsigned int j = 0; j < class_num; ++j) {
-            float data;
-            file.read((char *)&data, sizeof(float));
-            veclabel.push_back(data);
-          }
-          testDataLabel.push_back(veclabel);
-          rest_test--;
-          cur_test_bufsize++;
-          data_lock.unlock();
-        }
-        if (test_bufsize == cur_test_bufsize) {
+        } break;
+        case ::BUF_TEST: {
           std::lock_guard<std::mutex> lgtest(readyTestData);
           testReadyFlag = true;
           cv_test.notify_all();
-        }
+        } break;
+        default:
+          break;
       }
-    } break;
-    default:
-      break;
+    }
   }
 }
 
index eabecb1..57378ee 100644 (file)
  *
  */
 
-#include "include/layers.h"
+#include "layers.h"
 #include <assert.h>
+#include <nntrainer_log.h>
 #include <random>
-#include "include/nntrainer_log.h"
-#include "include/util_func.h"
+#include "util_func.h"
 
 static auto rng = [] {
   std::mt19937 rng;
index c0c1a5c..ff06d69 100644 (file)
  *
  */
 
-#include "include/neuralnet.h"
+#include "neuralnet.h"
 #include <assert.h>
+#include <nntrainer_log.h>
 #include <stdio.h>
 #include <array>
 #include <cmath>
 #include <sstream>
-#include "include/nntrainer_log.h"
 #include "iniparser.h"
 
 /**
index d4d6111..9e9b61c 100644 (file)
@@ -21,7 +21,7 @@
  * @bug No known bugs except for NYI items
  */
 
-#include <nntrainer_logger.h>
+#include "nntrainer_logger.h"
 #include <stdarg.h>
 #include <cstring>
 #include <ctime>
index 9dcb5ae..11d4cba 100644 (file)
@@ -21,9 +21,9 @@
  *
  */
 
-#include "include/optimizer.h"
-#include "include/nntrainer_log.h"
-#include "include/util_func.h"
+#include "optimizer.h"
+#include <nntrainer_log.h>
+#include "util_func.h"
 
 void Optimizer::initialize(unsigned int height, unsigned int width, bool setTensor) {
   if (type == OptType::adam && setTensor) {
index 87a9d1b..570a73d 100644 (file)
  *
  */
 
-#include "include/tensor.h"
+#include "tensor.h"
 #include <assert.h>
+#include <nntrainer_log.h>
 #include <stdio.h>
 #include <cstring>
 #include <sstream>
-#include "include/nntrainer_log.h"
 
 #ifdef USE_CUBLAS
 #include <helper_cuda.h>
index b5c83bf..0e57495 100644 (file)
 
 #ifndef __UTIL_FUNC_H__
 #define __UTIL_FUNC_H__
-#include "include/util_func.h"
+#include "util_func.h"
 #include <assert.h>
-#include "include/tensor.h"
 #include "math.h"
+#include "tensor.h"
 
 Tensors::Tensor softmaxPrime(Tensors::Tensor x) {
   int batch = x.getBatch();