From: jijoong.moon Date: Thu, 9 Apr 2020 04:36:56 +0000 (+0900) Subject: Remove Duplicated Codes @UpdateData X-Git-Tag: accepted/tizen/unified/20200706.064221~168 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=01d8330db7e3a91021634037f26691c6f72436aa;p=platform%2Fcore%2Fml%2Fnntrainer.git Remove Duplicated Codes @UpdateData Remove Duplicated Codes @UpdataData **Self evaluation:** 1. Build test: [X]Passed [ ]Failed [ ]Skipped 2. Run test: [X]Passed [ ]Failed [ ]Skipped Signed-off-by: jijoong.moon --- diff --git a/jni/Android.mk b/jni/Android.mk index d2426b2..1333d79 100644 --- a/jni/Android.mk +++ b/jni/Android.mk @@ -28,7 +28,7 @@ NNTRAINER_SRCS := $(NNTRAINER_ROOT)/nntrainer/src/neuralnet.cpp \ $(NNTRAINER_ROOT)/nntrainer/src/util_func.cpp \ $(NNTRAINER_ROOT)/nntrainer/src/optimizer.cpp \ -NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer +NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer/include INIPARSER_SRCS := $(INIPARSER_ROOT)/src/iniparser.c \ $(INIPARSER_ROOT)/src/dictionary.c diff --git a/nntrainer/src/databuffer.cpp b/nntrainer/src/databuffer.cpp index d9ec1a9..a5a7ef0 100644 --- a/nntrainer/src/databuffer.cpp +++ b/nntrainer/src/databuffer.cpp @@ -21,10 +21,12 @@ * */ -#include "include/databuffer.h" +#include "databuffer.h" #include +#include #include #include +#include #include #include #include @@ -100,171 +102,115 @@ bool DataBuffer::init(int mini_batch, unsigned int train_bufsize, unsigned int v } void DataBuffer::UpdateData(buffer_type type, std::ifstream &file) { + unsigned int max_size = 0; + unsigned int buf_size = 0; + unsigned int *rest_size = NULL; + unsigned int *cur_size = NULL; + bool *running = NULL; + std::vector> *data = NULL; + std::vector> *datalabel = NULL; + switch (type) { - case BUF_TRAIN: { - std::vector mark; - mark.resize(max_train); - file.seekg(0, std::ios_base::end); - int64_t file_length = file.tellg(); + case BUF_TRAIN: + max_size = max_train; + buf_size = train_bufsize; + rest_size = &rest_train; + cur_size = &cur_train_bufsize; + running = &train_running; + data = &trainData; + datalabel = &trainDataLabel; + break; + case BUF_VAL: + max_size = max_val; + buf_size = val_bufsize; + rest_size = &rest_val; + cur_size = &cur_val_bufsize; + running = &val_running; + data = &valData; + datalabel = &valDataLabel; + break; + case BUF_TEST: + max_size = max_test; + buf_size = test_bufsize; + rest_size = &rest_test; + cur_size = &cur_test_bufsize; + running = &test_running; + data = &testData; + datalabel = &testDataLabel; + break; + default: + break; + } - for (unsigned int i = 0; i < max_train; ++i) { - mark[i] = i; - } + unsigned int I; + std::vector mark; + mark.resize(max_size); + file.seekg(0, std::ios_base::end); + uint64_t file_length = file.tellg(); - unsigned int I; - while (train_running && mark.size() != 0) { - if (train_bufsize - cur_train_bufsize > 0 && rest_train > 0) { - data_lock.lock(); - std::vector vec; - std::vector veclabel; + for (unsigned int i = 0; i < max_size; ++i) { + mark[i] = i; + } - unsigned int id = rangeRandom(0, mark.size() - 1); - I = mark[id]; - if (I > max_test) - throw std::runtime_error("Error: Test case id cannot exceed maximum number of test"); + while ((*running) && mark.size() != 0) { + if (buf_size - (*cur_size) > 0 && (*rest_size) > 0) { + std::vector vec; + std::vector veclabel; - mark.erase(mark.begin() + id); + unsigned int id = rangeRandom(0, mark.size() - 1); + I = mark[id]; + if (I > max_size) + ml_loge("Error: Test case id cannot exceed maximum number of test"); - int64_t position = (I * input_size + I * class_num) * sizeof(float); + mark.erase(mark.begin() + id); + uint64_t position = (I * input_size + I * class_num) * sizeof(float); - if (position > file_length) - throw std::runtime_error("Error: Cannot exceed max file size"); + if (position > file_length || position > ULLONG_MAX) + ml_loge("Error: Cannot exceed max file size"); - file.seekg(position, std::ios::beg); + file.seekg(position, std::ios::beg); - for (unsigned int j = 0; j < input_size; ++j) { - float data; - file.read((char *)&data, sizeof(float)); - vec.push_back(data); - } - trainData.push_back(vec); - for (unsigned int j = 0; j < class_num; ++j) { - float data; - file.read((char *)&data, sizeof(float)); - veclabel.push_back(data); - } - trainDataLabel.push_back(veclabel); - rest_train--; - cur_train_bufsize++; - data_lock.unlock(); - } - if (train_bufsize == cur_train_bufsize) { - std::lock_guard lgtrain(readyTrainData); - trainReadyFlag = true; - cv_train.notify_all(); - } - } - } break; - case BUF_VAL: { - unsigned int I; - std::vector mark; - mark.resize(max_val); - file.seekg(0, std::ios_base::end); - int64_t file_length = file.tellg(); - - for (unsigned int i = 0; i < max_val; ++i) { - mark[i] = i; + for (unsigned int j = 0; j < input_size; ++j) { + float d; + file.read((char *)&d, sizeof(float)); + vec.push_back(d); } - while (val_running && mark.size() != 0) { - if (val_bufsize - cur_val_bufsize > 0 && rest_val > 0) { - data_lock.lock(); - std::vector vec; - std::vector veclabel; - - unsigned int id = rangeRandom(0, mark.size() - 1); - I = mark[id]; - if (I > max_test) - throw std::runtime_error("Error: Test case id cannot exceed maximum number of test"); - - mark.erase(mark.begin() + id); - - int64_t position = (I * input_size + I * class_num) * sizeof(float); - - if (position > file_length) - throw std::runtime_error("Error: Cannot exceed max file size"); - - file.seekg(position, std::ios::beg); + for (unsigned int j = 0; j < class_num; ++j) { + float d; + file.read((char *)&d, sizeof(float)); + veclabel.push_back(d); + } - for (unsigned int j = 0; j < input_size; ++j) { - float data; - file.read((char *)&data, sizeof(float)); - vec.push_back(data); - } - valData.push_back(vec); - for (unsigned int j = 0; j < class_num; ++j) { - float data; - file.read((char *)&data, sizeof(float)); - veclabel.push_back(data); - } - valDataLabel.push_back(veclabel); - rest_val--; - cur_val_bufsize++; - data_lock.unlock(); - } - if (val_bufsize == cur_val_bufsize) { + data_lock.lock(); + data->push_back(vec); + datalabel->push_back(veclabel); + (*rest_size)--; + (*cur_size)++; + data_lock.unlock(); + } + + if (buf_size == (*cur_size)) { + switch (type) { + case ::BUF_TRAIN: { + std::lock_guard lgtrain(readyTrainData); + trainReadyFlag = true; + cv_train.notify_all(); + } break; + case ::BUF_VAL: { std::lock_guard lgval(readyValData); valReadyFlag = true; cv_val.notify_all(); - } - } - } break; - case BUF_TEST: { - unsigned int I; - std::vector mark; - mark.resize(max_test); - file.seekg(0, std::ios_base::end); - int64_t file_length = file.tellg(); - - for (unsigned int i = 0; i < max_test; ++i) { - mark[i] = i; - } - - while (test_running && mark.size() != 0) { - if (test_bufsize - cur_test_bufsize >= 0 && rest_test > 0) { - data_lock.lock(); - std::vector vec; - std::vector veclabel; - - unsigned int id = rangeRandom(0, mark.size() - 1); - I = mark[id]; - if (I > max_test) - throw std::runtime_error("Error: Test case id cannot exceed maximum number of test"); - - mark.erase(mark.begin() + id); - - int64_t position = (I * input_size + I * class_num) * sizeof(float); - - if (position > file_length) - throw std::runtime_error("Error: Cannot exceed max file size"); - - file.seekg(position, std::ios::beg); - - for (unsigned int j = 0; j < input_size; ++j) { - float data; - file.read((char *)&data, sizeof(float)); - vec.push_back(data); - } - testData.push_back(vec); - for (unsigned int j = 0; j < class_num; ++j) { - float data; - file.read((char *)&data, sizeof(float)); - veclabel.push_back(data); - } - testDataLabel.push_back(veclabel); - rest_test--; - cur_test_bufsize++; - data_lock.unlock(); - } - if (test_bufsize == cur_test_bufsize) { + } break; + case ::BUF_TEST: { std::lock_guard lgtest(readyTestData); testReadyFlag = true; cv_test.notify_all(); - } + } break; + default: + break; } - } break; - default: - break; + } } } diff --git a/nntrainer/src/layers.cpp b/nntrainer/src/layers.cpp index eabecb1..57378ee 100644 --- a/nntrainer/src/layers.cpp +++ b/nntrainer/src/layers.cpp @@ -21,11 +21,11 @@ * */ -#include "include/layers.h" +#include "layers.h" #include +#include #include -#include "include/nntrainer_log.h" -#include "include/util_func.h" +#include "util_func.h" static auto rng = [] { std::mt19937 rng; diff --git a/nntrainer/src/neuralnet.cpp b/nntrainer/src/neuralnet.cpp index c0c1a5c..ff06d69 100644 --- a/nntrainer/src/neuralnet.cpp +++ b/nntrainer/src/neuralnet.cpp @@ -21,13 +21,13 @@ * */ -#include "include/neuralnet.h" +#include "neuralnet.h" #include +#include #include #include #include #include -#include "include/nntrainer_log.h" #include "iniparser.h" /** diff --git a/nntrainer/src/nntrainer_logger.cpp b/nntrainer/src/nntrainer_logger.cpp index d4d6111..9e9b61c 100644 --- a/nntrainer/src/nntrainer_logger.cpp +++ b/nntrainer/src/nntrainer_logger.cpp @@ -21,7 +21,7 @@ * @bug No known bugs except for NYI items */ -#include +#include "nntrainer_logger.h" #include #include #include diff --git a/nntrainer/src/optimizer.cpp b/nntrainer/src/optimizer.cpp index 9dcb5ae..11d4cba 100644 --- a/nntrainer/src/optimizer.cpp +++ b/nntrainer/src/optimizer.cpp @@ -21,9 +21,9 @@ * */ -#include "include/optimizer.h" -#include "include/nntrainer_log.h" -#include "include/util_func.h" +#include "optimizer.h" +#include +#include "util_func.h" void Optimizer::initialize(unsigned int height, unsigned int width, bool setTensor) { if (type == OptType::adam && setTensor) { diff --git a/nntrainer/src/tensor.cpp b/nntrainer/src/tensor.cpp index 87a9d1b..570a73d 100644 --- a/nntrainer/src/tensor.cpp +++ b/nntrainer/src/tensor.cpp @@ -21,12 +21,12 @@ * */ -#include "include/tensor.h" +#include "tensor.h" #include +#include #include #include #include -#include "include/nntrainer_log.h" #ifdef USE_CUBLAS #include diff --git a/nntrainer/src/util_func.cpp b/nntrainer/src/util_func.cpp index b5c83bf..0e57495 100644 --- a/nntrainer/src/util_func.cpp +++ b/nntrainer/src/util_func.cpp @@ -22,10 +22,10 @@ #ifndef __UTIL_FUNC_H__ #define __UTIL_FUNC_H__ -#include "include/util_func.h" +#include "util_func.h" #include -#include "include/tensor.h" #include "math.h" +#include "tensor.h" Tensors::Tensor softmaxPrime(Tensors::Tensor x) { int batch = x.getBatch();