Split files for DataBufferFromFile & DataBufferformCallback
authorjijoong.moon <jijoong.moon@samsung.com>
Mon, 27 Apr 2020 05:56:05 +0000 (14:56 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Mon, 27 Apr 2020 16:49:08 +0000 (01:49 +0900)
data_buffer.h & cpp is too big to manage. Its inherited clases are
saved in seperate files.

- data_buffer_func.h & data_buffer_func.cpp
- data_buffer_file.h & data_buffer_file.cpp

**Self evaluation:**
1. Build test:  [X]Passed [ ]Failed [ ]Skipped
2. Run test:  [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
13 files changed:
Applications/Classification/jni/Android.mk
Applications/Classification/jni/main_func.cpp
jni/Android.mk
nntrainer/include/databuffer.h
nntrainer/include/databuffer_file.h [new file with mode: 0644]
nntrainer/include/databuffer_func.h [new file with mode: 0644]
nntrainer/meson.build
nntrainer/src/databuffer.cpp
nntrainer/src/databuffer_file.cpp [new file with mode: 0644]
nntrainer/src/databuffer_func.cpp [new file with mode: 0644]
nntrainer/src/neuralnet.cpp
packaging/nntrainer.spec
test/unittest/unittest_nntrainer_internal.cpp

index 0551512..3e22105 100644 (file)
@@ -105,3 +105,25 @@ LOCAL_STATIC_LIBRARIES := tensorflow-lite
 LOCAL_C_INCLUDES += $(TFLITE_INCLUDES) $(NNTRAINER_INCLUDES)
 
 include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_ARM_NEON := true
+LOCAL_CFLAGS += -std=c++11 -Ofast -mcpu=cortex-a53 -Ilz4-nougat/lib
+LOCAL_LDFLAGS += -Llz4-nougat/lib/obj/local/arm64-v8a/
+LOCAL_CXXFLAGS += -std=c++11
+LOCAL_CFLAGS += -pthread -fopenmp
+LOCAL_LDFLAGS += -fopenmp 
+LOCAL_MODULE_TAGS := optional
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := nntrainer_classification_func
+LOCAL_LDLIBS := -llog
+
+LOCAL_SRC_FILES := main_func.cpp
+
+LOCAL_SHARED_LIBRARIES := nntrainer
+
+LOCAL_STATIC_LIBRARIES := tensorflow-lite
+
+LOCAL_C_INCLUDES += $(TFLITE_INCLUDES) $(NNTRAINER_INCLUDES)
+
+include $(BUILD_EXECUTABLE)
index 8547d0b..26fdfad 100644 (file)
@@ -267,10 +267,8 @@ int main(int argc, char *argv[]) {
   }
   const vector<string> args(argv + 1, argv + argc);
   std::string config = args[0];
-  data_path = args[1];
 
   srand(time(NULL));
-  std::string ini_file = data_path + "ini.bin";
   std::vector<std::vector<float>> inputVector, outputVector;
   std::vector<std::vector<float>> inputValVector, outputValVector;
   std::vector<std::vector<float>> inputTestVector, outputTestVector;
index 1333d79..42c1dfd 100644 (file)
@@ -25,6 +25,8 @@ NNTRAINER_SRCS := $(NNTRAINER_ROOT)/nntrainer/src/neuralnet.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/tensor.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/layers.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/databuffer.cpp \
+                  $(NNTRAINER_ROOT)/nntrainer/src/databuffer_func.cpp \
+                  $(NNTRAINER_ROOT)/nntrainer/src/databuffer_file.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/util_func.cpp \
                   $(NNTRAINER_ROOT)/nntrainer/src/optimizer.cpp \
 
index a7e3865..2a3b6dd 100644 (file)
@@ -289,119 +289,16 @@ protected:
 
   std::vector<std::string> labels;
   bool validation[NBUFTYPE];
-};
 
-/**
- * @class   DataBufferFromDataFile Data Buffer from Raw Data File
- * @brief   Data Buffer from reading raw data
- */
-class DataBufferFromDataFile : public DataBuffer {
-
-public:
   /**
-   * @brief     Constructor
-   */
-  DataBufferFromDataFile(){};
-
-  /**
-   * @brief     Destructor
-   */
-  ~DataBufferFromDataFile(){};
-
-  /**
-   * @brief     Initialize Buffer with data buffer private variables
-   * @retval #ML_ERROR_NONE Successful.
-   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
-   */
-  int init();
-
-  /**
-   * @brief     Update Data Buffer ( it is for child thread )
-   * @param[in] BufferType training, validation, test
-   * @retval    void
-   */
-  void updateData(BufferType type, int &status);
-
-  /**
-   * @brief     set train data file name
-   * @param[in] path file path
-   * @param[in] type data type : DATA_TRAIN, DATA_VAL, DATA_TEST
-   * @retval #ML_ERROR_NONE Successful.
-   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
-   */
-  int setDataFile(std::string path, DataType type);
-
-  /**
-   * @brief     set feature size
-   * @param[in] feature batch size. It is equal to input layer's hidden size
-   * @retval #ML_ERROR_NONE Successful.
-   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+   * @brief     return random int value between min to max
+   * @param[in] min minimum vaule
+   * @param[in] max maximum value
+   * @retval    int return value
    */
-  int setFeatureSize(unsigned int n);
-
-private:
-  /**
-   * @brief     raw data file names
-   */
-  std::string train_name;
-  std::string val_name;
-  std::string test_name;
+  int rangeRandom(int min, int max);
 };
 
-/**
- * @class   DataBufferFromCallback Data Buffer from callback given by user
- * @brief   Data Buffer from callback function
- */
-class DataBufferFromCallback : public DataBuffer {
-public:
-  /**
-   * @brief     Constructor
-   */
-  DataBufferFromCallback(){};
-
-  /**
-   * @brief     Destructor
-   */
-  ~DataBufferFromCallback(){};
-
-  /**
-   * @brief     Initialize Buffer
-   * @retval #ML_ERROR_NONE Successful.
-   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
-   */
-  int init();
-
-  /**
-   * @brief     set function pointer for each type
-   * @param[in] type Buffer Type
-   * @param[in] call back function pointer
-   * @retval #ML_ERROR_NONE Successful.
-   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
-   */
-  int setFunc(BufferType type,
-              std::function<bool(vec_3d &, vec_3d &, int &)> func);
-
-  /**
-   * @brief     Update Data Buffer ( it is for child thread )
-   * @param[in] BufferType training, validation, test
-   * @retval    void
-   */
-  void updateData(BufferType type, int &status);
-
-private:
-  /**
-   *
-   * @brief Callback function to get user specific data
-   * @param[in] X data  3D float vector type
-   * @param[in] Y label 3D float vector type
-   * @param[out] status status for error handle
-   * @retval true / false generate all data for this epoch
-   *
-   */
-  std::function<bool(vec_3d &, vec_3d &, int &)> callback_train;
-  std::function<bool(vec_3d &, vec_3d &, int &)> callback_val;
-  std::function<bool(vec_3d &, vec_3d &, int &)> callback_test;
-};
 } // namespace nntrainer
 #endif /* __cplusplus */
 #endif /* __DATABUFFER_H__ */
diff --git a/nntrainer/include/databuffer_file.h b/nntrainer/include/databuffer_file.h
new file mode 100644 (file)
index 0000000..f748fda
--- /dev/null
@@ -0,0 +1,98 @@
+/**
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ * @file       databuffer_file.h
+ * @date       27 April 2020
+ * @brief      This is buffer object take data from raw files
+ * @see                https://github.com/nnstreamer/nntrainer
+ * @author     Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug                No known bugs except for NYI items
+ *
+ */
+
+#ifndef __DATABUFFER_FILE_H__
+#define __DATABUFFER_FILE_H__
+#ifdef __cplusplus
+
+#include "databuffer.h"
+#include <atomic>
+#include <fstream>
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <thread>
+#include <vector>
+
+namespace nntrainer {
+
+/**
+ * @class   DataBufferFromDataFile Data Buffer from Raw Data File
+ * @brief   Data Buffer from reading raw data
+ */
+class DataBufferFromDataFile : public DataBuffer {
+
+public:
+  /**
+   * @brief     Constructor
+   */
+  DataBufferFromDataFile(){};
+
+  /**
+   * @brief     Destructor
+   */
+  ~DataBufferFromDataFile(){};
+
+  /**
+   * @brief     Initialize Buffer with data buffer private variables
+   * @retval #ML_ERROR_NONE Successful.
+   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+   */
+  int init();
+
+  /**
+   * @brief     Update Data Buffer ( it is for child thread )
+   * @param[in] BufferType training, validation, test
+   * @retval    void
+   */
+  void updateData(BufferType type, int &status);
+
+  /**
+   * @brief     set train data file name
+   * @param[in] path file path
+   * @param[in] type data type : DATA_TRAIN, DATA_VAL, DATA_TEST
+   * @retval #ML_ERROR_NONE Successful.
+   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+   */
+  int setDataFile(std::string path, DataType type);
+
+  /**
+   * @brief     set feature size
+   * @param[in] feature batch size. It is equal to input layer's hidden size
+   * @retval #ML_ERROR_NONE Successful.
+   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+   */
+  int setFeatureSize(unsigned int n);
+
+private:
+  /**
+   * @brief     raw data file names
+   */
+  std::string train_name;
+  std::string val_name;
+  std::string test_name;
+};
+
+} // namespace nntrainer
+#endif /* __cplusplus */
+#endif /* __DATABUFFER_FILE_H__ */
diff --git a/nntrainer/include/databuffer_func.h b/nntrainer/include/databuffer_func.h
new file mode 100644 (file)
index 0000000..164a5c1
--- /dev/null
@@ -0,0 +1,95 @@
+/**
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ * @file       databuffer_file.h
+ * @date       27 April 2020
+ * @brief      This is buffer object take data from raw files
+ * @see                https://github.com/nnstreamer/nntrainer
+ * @author     Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug                No known bugs except for NYI items
+ *
+ */
+
+#ifndef __DATABUFFER_FUNC_H__
+#define __DATABUFFER_FUNC_H__
+#ifdef __cplusplus
+
+#include "databuffer.h"
+#include <atomic>
+#include <fstream>
+#include <functional>
+#include <iostream>
+#include <memory>
+#include <thread>
+#include <vector>
+
+namespace nntrainer {
+
+/**
+ * @class   DataBufferFromCallback Data Buffer from callback given by user
+ * @brief   Data Buffer from callback function
+ */
+class DataBufferFromCallback : public DataBuffer {
+public:
+  /**
+   * @brief     Constructor
+   */
+  DataBufferFromCallback(){};
+
+  /**
+   * @brief     Destructor
+   */
+  ~DataBufferFromCallback(){};
+
+  /**
+   * @brief     Initialize Buffer
+   * @retval #ML_ERROR_NONE Successful.
+   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+   */
+  int init();
+
+  /**
+   * @brief     set function pointer for each type
+   * @param[in] type Buffer Type
+   * @param[in] call back function pointer
+   * @retval #ML_ERROR_NONE Successful.
+   * @retval #ML_ERROR_INVALID_PARAMETER invalid parameter.
+   */
+  int setFunc(BufferType type,
+              std::function<bool(vec_3d &, vec_3d &, int &)> func);
+
+  /**
+   * @brief     Update Data Buffer ( it is for child thread )
+   * @param[in] BufferType training, validation, test
+   * @retval    void
+   */
+  void updateData(BufferType type, int &status);
+
+private:
+  /**
+   *
+   * @brief Callback function to get user specific data
+   * @param[in] X data  3D float vector type
+   * @param[in] Y label 3D float vector type
+   * @param[out] status status for error handle
+   * @retval true / false generate all data for this epoch
+   *
+   */
+  std::function<bool(vec_3d &, vec_3d &, int &)> callback_train;
+  std::function<bool(vec_3d &, vec_3d &, int &)> callback_val;
+  std::function<bool(vec_3d &, vec_3d &, int &)> callback_test;
+};
+} // namespace nntrainer
+#endif /* __cplusplus */
+#endif /* __DATABUFFER_FUNC_H__ */
index 6b54a6a..e02681b 100644 (file)
@@ -18,6 +18,8 @@ nntrainer_sources = [
   'src/tensor.cpp',
   'src/layers.cpp',
   'src/databuffer.cpp',
+  'src/databuffer_file.cpp',
+  'src/databuffer_func.cpp',
   'src/nntrainer_logger.cpp',
   'src/optimizer.cpp',
   'src/util_func.cpp'
@@ -28,6 +30,8 @@ nntrainer_headers = [
   'include/tensor.h',
   'include/layers.h',
   'include/databuffer.h',
+  'include/databuffer_file.h',
+  'include/databuffer_func.h',
   'include/nntrainer_log.h',
   'include/nntrainer_logger.h',
   'include/optimizer.h',
index a3189e3..2ba97a7 100644 (file)
 #include <stdlib.h>
 #include <thread>
 
-#define NN_EXCEPTION_NOTI(val)                             \
-  do {                                                     \
-    switch (type) {                                        \
-    case BUF_TRAIN: {                                      \
-      std::lock_guard<std::mutex> lgtrain(readyTrainData); \
-      trainReadyFlag = val;                                \
-      cv_train.notify_all();                               \
-    } break;                                               \
-    case BUF_VAL: {                                        \
-      std::lock_guard<std::mutex> lgval(readyValData);     \
-      valReadyFlag = val;                                  \
-      cv_val.notify_all();                                 \
-    } break;                                               \
-    case BUF_TEST: {                                       \
-      std::lock_guard<std::mutex> lgtest(readyTestData);   \
-      testReadyFlag = val;                                 \
-      cv_test.notify_all();                                \
-    } break;                                               \
-    default:                                               \
-      break;                                               \
-    }                                                      \
-  } while (0)
-
-static std::exception_ptr globalExceptionPtr = nullptr;
+std::exception_ptr globalExceptionPtr = nullptr;
 
 namespace nntrainer {
 
@@ -78,7 +55,7 @@ DataStatus trainReadyFlag;
 DataStatus valReadyFlag;
 DataStatus testReadyFlag;
 
-static int rangeRandom(int min, int max) {
+int DataBuffer::rangeRandom(int min, int max) {
   int n = max - min + 1;
   int remainder = RAND_MAX % n;
   int x;
@@ -88,16 +65,6 @@ static int rangeRandom(int min, int max) {
   return min + x % n;
 }
 
-static long getFileSize(std::string file_name) {
-  std::ifstream file_stream(file_name.c_str(), std::ios::in | std::ios::binary);
-  if (file_stream.good()) {
-    file_stream.seekg(0, std::ios::end);
-    return file_stream.tellg();
-  } else {
-    return 0;
-  }
-}
-
 int DataBuffer::run(BufferType type) {
   int status = ML_ERROR_NONE;
   switch (type) {
@@ -457,443 +424,4 @@ void DataBuffer::displayProgress(const int count, BufferType type, float loss) {
   std::cout.flush();
 }
 
-int DataBufferFromDataFile::init() {
-
-  int status = ML_ERROR_NONE;
-
-  if (!class_num) {
-    ml_loge("Error: number of class must be set");
-    SET_VALIDATION(false);
-    return ML_ERROR_INVALID_PARAMETER;
-  }
-
-  if (!this->input_size) {
-    ml_loge("Error: featuer size must be set");
-    SET_VALIDATION(false);
-    return ML_ERROR_INVALID_PARAMETER;
-  }
-
-  this->cur_train_bufsize = 0;
-  this->cur_val_bufsize = 0;
-  this->cur_test_bufsize = 0;
-
-  if (mini_batch == 0) {
-    ml_loge("Error: mini batch size must be greater than 0");
-    SET_VALIDATION(false);
-    return ML_ERROR_INVALID_PARAMETER;
-  }
-
-  this->rest_train = max_train;
-  this->rest_val = max_val;
-  this->rest_test = max_test;
-
-  this->train_running = true;
-  this->val_running = true;
-  this->test_running = true;
-
-  trainReadyFlag = DATA_NOT_READY;
-  valReadyFlag = DATA_NOT_READY;
-  testReadyFlag = DATA_NOT_READY;
-  return status;
-}
-
-void DataBufferFromDataFile::updateData(BufferType type, int &status) {
-  unsigned int max_size = 0;
-  unsigned int buf_size = 0;
-  unsigned int *rest_size = NULL;
-  unsigned int *cur_size = NULL;
-  bool *running = NULL;
-  std::vector<std::vector<float>> *data = NULL;
-  std::vector<std::vector<float>> *datalabel = NULL;
-  std::ifstream file;
-  switch (type) {
-  case BUF_TRAIN: {
-    max_size = max_train;
-    buf_size = bufsize;
-    rest_size = &rest_train;
-    cur_size = &cur_train_bufsize;
-    running = &train_running;
-    data = &train_data;
-    datalabel = &train_data_label;
-    std::ifstream train_stream(train_name, std::ios::in | std::ios::binary);
-    file.swap(train_stream);
-  } break;
-  case BUF_VAL: {
-    max_size = max_val;
-    buf_size = bufsize;
-    rest_size = &rest_val;
-    cur_size = &cur_val_bufsize;
-    running = &val_running;
-    data = &val_data;
-    datalabel = &val_data_label;
-    std::ifstream val_stream(val_name, std::ios::in | std::ios::binary);
-    file.swap(val_stream);
-  } break;
-  case BUF_TEST: {
-    max_size = max_test;
-    buf_size = bufsize;
-    rest_size = &rest_test;
-    cur_size = &cur_test_bufsize;
-    running = &test_running;
-    data = &test_data;
-    datalabel = &test_data_label;
-    std::ifstream test_stream(test_name, std::ios::in | std::ios::binary);
-    file.swap(test_stream);
-  } break;
-  default:
-    try {
-      throw std::runtime_error("Error: Not Supported Data Type");
-    } catch (...) {
-      globalExceptionPtr = std::current_exception();
-      NN_EXCEPTION_NOTI(DATA_ERROR);
-      return;
-    }
-    break;
-  }
-
-  unsigned int I;
-  std::vector<unsigned int> mark;
-  mark.resize(max_size);
-  file.clear();
-  file.seekg(0, std::ios_base::end);
-  uint64_t file_length = file.tellg();
-
-  for (unsigned int i = 0; i < max_size; ++i) {
-    mark[i] = i;
-  }
-
-  while ((*running) && mark.size() != 0) {
-    if (buf_size - (*cur_size) > 0 && (*rest_size) > 0) {
-      std::vector<float> vec;
-      std::vector<float> veclabel;
-
-      unsigned int id = rangeRandom(0, mark.size() - 1);
-      I = mark[id];
-
-      try {
-        if (I > max_size) {
-          ml_loge("Error: Test case id cannot exceed maximum number of test");
-          status = ML_ERROR_INVALID_PARAMETER;
-          throw std::runtime_error(
-            "Error: Test case id cannot exceed maximum number of test");
-        }
-      } catch (...) {
-        globalExceptionPtr = std::current_exception();
-        NN_EXCEPTION_NOTI(DATA_ERROR);
-        return;
-      }
-
-      mark.erase(mark.begin() + id);
-      uint64_t position = (I * input_size + I * class_num) * sizeof(float);
-      try {
-        if (position > file_length || position > ULLONG_MAX) {
-          ml_loge("Error: Cannot exceed max file size");
-          status = ML_ERROR_INVALID_PARAMETER;
-          throw std::runtime_error("Error: Cannot exceed max file size");
-        }
-      } catch (...) {
-        globalExceptionPtr = std::current_exception();
-        NN_EXCEPTION_NOTI(DATA_ERROR);
-        return;
-      }
-
-      file.seekg(position, std::ios::beg);
-
-      for (unsigned int j = 0; j < input_size; ++j) {
-        float d;
-        file.read((char *)&d, sizeof(float));
-        vec.push_back(d);
-      }
-
-      for (unsigned int j = 0; j < class_num; ++j) {
-        float d;
-        file.read((char *)&d, sizeof(float));
-        veclabel.push_back(d);
-      }
-
-      data_lock.lock();
-      data->push_back(vec);
-      datalabel->push_back(veclabel);
-      (*rest_size)--;
-      (*cur_size)++;
-      data_lock.unlock();
-    }
-
-    if (buf_size == (*cur_size)) {
-      NN_EXCEPTION_NOTI(DATA_READY);
-    }
-  }
-  file.close();
-}
-
-int DataBufferFromDataFile::setDataFile(std::string path, DataType type) {
-  int status = ML_ERROR_NONE;
-  std::ifstream data_file(path.c_str());
-
-  switch (type) {
-  case DATA_TRAIN: {
-    if (!data_file.good()) {
-      ml_loge(
-        "Error: Cannot open data file, Datafile is necessary for training");
-      validation[type] = false;
-      return ML_ERROR_INVALID_PARAMETER;
-    }
-    train_name = path;
-  } break;
-  case DATA_VAL: {
-    if (!data_file.good()) {
-      ml_logw("Warning: Cannot open validation data file. Cannot validate "
-              "training result");
-      validation[type] = false;
-      break;
-    }
-    val_name = path;
-  } break;
-  case DATA_TEST: {
-    if (!data_file.good()) {
-      ml_logw(
-        "Warning: Cannot open test data file. Cannot test training result");
-      validation[type] = false;
-      break;
-    }
-    test_name = path;
-  } break;
-  case DATA_LABEL: {
-    std::string data;
-    if (!data_file.good()) {
-      ml_loge("Error: Cannot open label file");
-      SET_VALIDATION(false);
-      return ML_ERROR_INVALID_PARAMETER;
-    }
-    while (data_file >> data) {
-      labels.push_back(data);
-    }
-    if (class_num != 0 && class_num != labels.size()) {
-      ml_loge("Error: number of label should be same with number class number");
-      SET_VALIDATION(false);
-      return ML_ERROR_INVALID_PARAMETER;
-    }
-    class_num = labels.size();
-  } break;
-  case DATA_UNKNOWN:
-  default:
-    ml_loge("Error: Not Supported Data Type");
-    SET_VALIDATION(false);
-    return ML_ERROR_INVALID_PARAMETER;
-    break;
-  }
-  return status;
-}
-
-int DataBufferFromDataFile::setFeatureSize(unsigned int size) {
-  int status = ML_ERROR_NONE;
-  long file_size = 0;
-
-  status = DataBuffer::setFeatureSize(size);
-  if (status != ML_ERROR_NONE)
-    return status;
-
-  if (validation[DATA_TRAIN]) {
-    file_size = getFileSize(train_name);
-    max_train = static_cast<unsigned int>(
-      file_size / (class_num * sizeof(int) + input_size * sizeof(float)));
-    if (max_train < mini_batch) {
-      ml_logw(
-        "Warning: number of training data is smaller than mini batch size");
-    }
-  } else {
-    max_train = 0;
-  }
-
-  if (validation[DATA_VAL]) {
-    file_size = getFileSize(val_name);
-    max_val = static_cast<unsigned int>(
-      file_size / (class_num * sizeof(int) + input_size * sizeof(float)));
-    if (max_val < mini_batch) {
-      ml_logw("Warning: number of val data is smaller than mini batch size");
-    }
-  } else {
-    max_val = 0;
-  }
-
-  if (validation[DATA_TEST]) {
-    file_size = getFileSize(test_name);
-    max_test = static_cast<unsigned int>(
-      file_size / (class_num * sizeof(int) + input_size * sizeof(float)));
-    if (max_test < mini_batch) {
-      ml_logw("Warning: number of test data is smaller than mini batch size");
-    }
-  } else {
-    max_test = 0;
-  }
-
-  return status;
-}
-
-int DataBufferFromCallback::init() {
-  int status = ML_ERROR_NONE;
-
-  if (!class_num) {
-    ml_loge("Error: number of class must be set");
-    SET_VALIDATION(false);
-    return ML_ERROR_INVALID_PARAMETER;
-  }
-
-  if (!this->input_size) {
-    ml_loge("Error: featuer size must be set");
-    SET_VALIDATION(false);
-    return ML_ERROR_INVALID_PARAMETER;
-  }
-
-  this->cur_train_bufsize = 0;
-  this->cur_val_bufsize = 0;
-  this->cur_test_bufsize = 0;
-
-  this->max_train = 0;
-  this->max_val = 0;
-  this->max_test = 0;
-
-  if (mini_batch == 0) {
-    ml_loge("Error: mini batch size must be greater than 0");
-    SET_VALIDATION(false);
-    return ML_ERROR_INVALID_PARAMETER;
-  }
-
-  this->train_running = true;
-  this->val_running = true;
-  this->test_running = true;
-
-  trainReadyFlag = DATA_NOT_READY;
-  valReadyFlag = DATA_NOT_READY;
-  testReadyFlag = DATA_NOT_READY;
-
-  return status;
-}
-
-int DataBufferFromCallback::setFunc(
-  BufferType type, std::function<bool(vec_3d &, vec_3d &, int &)> func) {
-
-  int status = ML_ERROR_NONE;
-  switch (type) {
-  case BUF_TRAIN:
-    callback_train = func;
-    if (func == NULL)
-      validation[0] = false;
-    break;
-  case BUF_VAL:
-    callback_val = func;
-    if (func == NULL)
-      validation[1] = false;
-    break;
-  case BUF_TEST:
-    callback_test = func;
-    if (func == NULL)
-      validation[2] = false;
-    break;
-  default:
-    status = ML_ERROR_INVALID_PARAMETER;
-    break;
-  }
-
-  return status;
-}
-
-void DataBufferFromCallback::updateData(BufferType type, int &status) {
-  status = ML_ERROR_NONE;
-
-  unsigned int buf_size = 0;
-  unsigned int *cur_size = NULL;
-  bool *running = NULL;
-  std::vector<std::vector<float>> *data = NULL;
-  std::vector<std::vector<float>> *datalabel = NULL;
-  std::function<bool(vec_3d &, vec_3d &, int &)> callback;
-
-  switch (type) {
-  case BUF_TRAIN: {
-    buf_size = bufsize;
-    cur_size = &cur_train_bufsize;
-    running = &train_running;
-    data = &train_data;
-    datalabel = &train_data_label;
-    callback = callback_train;
-  } break;
-  case BUF_VAL: {
-    buf_size = bufsize;
-    cur_size = &cur_val_bufsize;
-    running = &val_running;
-    data = &val_data;
-    datalabel = &val_data_label;
-    callback = callback_val;
-  } break;
-  case BUF_TEST: {
-    buf_size = bufsize;
-    cur_size = &cur_test_bufsize;
-    running = &test_running;
-    data = &test_data;
-    datalabel = &test_data_label;
-    callback = callback_test;
-  } break;
-  default:
-    break;
-  }
-
-  while ((*running)) {
-    if (buf_size - (*cur_size) > 0) {
-      vec_3d vec;
-      vec_3d veclabel;
-
-      bool endflag = callback(vec, veclabel, status);
-      if (!endflag)
-        break;
-
-      if (vec.size() != veclabel.size()) {
-        status = ML_ERROR_INVALID_PARAMETER;
-      }
-
-      for (unsigned int i = 0; i < vec.size(); ++i) {
-        std::vector<float> v;
-        std::vector<float> vl;
-        for (unsigned int j = 0; j < vec[i].size(); ++j) {
-          for (unsigned int k = 0; k < vec[i][j].size(); ++k) {
-            v.push_back(vec[i][j][k]);
-          }
-        }
-        for (unsigned int j = 0; j < veclabel[i].size(); ++j) {
-          for (unsigned int k = 0; k < veclabel[i][j].size(); ++k) {
-            vl.push_back(veclabel[i][j][k]);
-          }
-        }
-
-        data_lock.lock();
-        data->push_back(v);
-        datalabel->push_back(vl);
-        (*cur_size)++;
-        data_lock.unlock();
-      }
-    }
-
-    if (buf_size == (*cur_size)) {
-      switch (type) {
-      case BUF_TRAIN: {
-        std::lock_guard<std::mutex> lgtrain(readyTrainData);
-        trainReadyFlag = DATA_READY;
-        cv_train.notify_all();
-      } break;
-      case BUF_VAL: {
-        std::lock_guard<std::mutex> lgval(readyValData);
-        valReadyFlag = DATA_READY;
-        cv_val.notify_all();
-      } break;
-      case BUF_TEST: {
-        std::lock_guard<std::mutex> lgtest(readyTestData);
-        testReadyFlag = DATA_READY;
-        cv_test.notify_all();
-      } break;
-      default:
-        break;
-      }
-    }
-  }
-}
-
 } /* namespace nntrainer */
diff --git a/nntrainer/src/databuffer_file.cpp b/nntrainer/src/databuffer_file.cpp
new file mode 100644 (file)
index 0000000..76c953b
--- /dev/null
@@ -0,0 +1,364 @@
+/**
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ * @file       databuffer_file.cpp
+ * @date       27 April 2020
+ * @brief      This is buffer object take data from raw files
+ * @see                https://github.com/nnstreamer/nntrainer
+ * @author     Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug                No known bugs except for NYI items
+ *
+ */
+
+#include "databuffer_file.h"
+#include "nntrainer_error.h"
+#include <assert.h>
+#include <climits>
+#include <condition_variable>
+#include <cstring>
+#include <functional>
+#include <iomanip>
+#include <mutex>
+#include <nntrainer_log.h>
+#include <sstream>
+#include <stdexcept>
+#include <stdio.h>
+#include <stdlib.h>
+#include <thread>
+
+#define NN_EXCEPTION_NOTI(val)                             \
+  do {                                                     \
+    switch (type) {                                        \
+    case BUF_TRAIN: {                                      \
+      std::lock_guard<std::mutex> lgtrain(readyTrainData); \
+      trainReadyFlag = val;                                \
+      cv_train.notify_all();                               \
+    } break;                                               \
+    case BUF_VAL: {                                        \
+      std::lock_guard<std::mutex> lgval(readyValData);     \
+      valReadyFlag = val;                                  \
+      cv_val.notify_all();                                 \
+    } break;                                               \
+    case BUF_TEST: {                                       \
+      std::lock_guard<std::mutex> lgtest(readyTestData);   \
+      testReadyFlag = val;                                 \
+      cv_test.notify_all();                                \
+    } break;                                               \
+    default:                                               \
+      break;                                               \
+    }                                                      \
+  } while (0)
+
+extern std::exception_ptr globalExceptionPtr;
+
+namespace nntrainer {
+
+extern std::mutex data_lock;
+
+extern std::mutex readyTrainData;
+extern std::mutex readyValData;
+extern std::mutex readyTestData;
+
+extern std::condition_variable cv_train;
+extern std::condition_variable cv_val;
+extern std::condition_variable cv_test;
+
+extern DataStatus trainReadyFlag;
+extern DataStatus valReadyFlag;
+extern DataStatus testReadyFlag;
+
+static long getFileSize(std::string file_name) {
+  std::ifstream file_stream(file_name.c_str(), std::ios::in | std::ios::binary);
+  if (file_stream.good()) {
+    file_stream.seekg(0, std::ios::end);
+    return file_stream.tellg();
+  } else {
+    return 0;
+  }
+}
+
+int DataBufferFromDataFile::init() {
+
+  int status = ML_ERROR_NONE;
+
+  if (!class_num) {
+    ml_loge("Error: number of class must be set");
+    SET_VALIDATION(false);
+    return ML_ERROR_INVALID_PARAMETER;
+  }
+
+  if (!this->input_size) {
+    ml_loge("Error: featuer size must be set");
+    SET_VALIDATION(false);
+    return ML_ERROR_INVALID_PARAMETER;
+  }
+
+  this->cur_train_bufsize = 0;
+  this->cur_val_bufsize = 0;
+  this->cur_test_bufsize = 0;
+
+  if (mini_batch == 0) {
+    ml_loge("Error: mini batch size must be greater than 0");
+    SET_VALIDATION(false);
+    return ML_ERROR_INVALID_PARAMETER;
+  }
+
+  this->rest_train = max_train;
+  this->rest_val = max_val;
+  this->rest_test = max_test;
+
+  this->train_running = true;
+  this->val_running = true;
+  this->test_running = true;
+
+  trainReadyFlag = DATA_NOT_READY;
+  valReadyFlag = DATA_NOT_READY;
+  testReadyFlag = DATA_NOT_READY;
+  return status;
+}
+
+void DataBufferFromDataFile::updateData(BufferType type, int &status) {
+  unsigned int max_size = 0;
+  unsigned int buf_size = 0;
+  unsigned int *rest_size = NULL;
+  unsigned int *cur_size = NULL;
+  bool *running = NULL;
+  std::vector<std::vector<float>> *data = NULL;
+  std::vector<std::vector<float>> *datalabel = NULL;
+  std::ifstream file;
+  switch (type) {
+  case BUF_TRAIN: {
+    max_size = max_train;
+    buf_size = bufsize;
+    rest_size = &rest_train;
+    cur_size = &cur_train_bufsize;
+    running = &train_running;
+    data = &train_data;
+    datalabel = &train_data_label;
+    std::ifstream train_stream(train_name, std::ios::in | std::ios::binary);
+    file.swap(train_stream);
+  } break;
+  case BUF_VAL: {
+    max_size = max_val;
+    buf_size = bufsize;
+    rest_size = &rest_val;
+    cur_size = &cur_val_bufsize;
+    running = &val_running;
+    data = &val_data;
+    datalabel = &val_data_label;
+    std::ifstream val_stream(val_name, std::ios::in | std::ios::binary);
+    file.swap(val_stream);
+  } break;
+  case BUF_TEST: {
+    max_size = max_test;
+    buf_size = bufsize;
+    rest_size = &rest_test;
+    cur_size = &cur_test_bufsize;
+    running = &test_running;
+    data = &test_data;
+    datalabel = &test_data_label;
+    std::ifstream test_stream(test_name, std::ios::in | std::ios::binary);
+    file.swap(test_stream);
+  } break;
+  default:
+    try {
+      throw std::runtime_error("Error: Not Supported Data Type");
+    } catch (...) {
+      globalExceptionPtr = std::current_exception();
+      NN_EXCEPTION_NOTI(DATA_ERROR);
+      return;
+    }
+    break;
+  }
+
+  unsigned int I;
+  std::vector<unsigned int> mark;
+  mark.resize(max_size);
+  file.clear();
+  file.seekg(0, std::ios_base::end);
+  uint64_t file_length = file.tellg();
+
+  for (unsigned int i = 0; i < max_size; ++i) {
+    mark[i] = i;
+  }
+
+  while ((*running) && mark.size() != 0) {
+    if (buf_size - (*cur_size) > 0 && (*rest_size) > 0) {
+      std::vector<float> vec;
+      std::vector<float> veclabel;
+
+      unsigned int id = rangeRandom(0, mark.size() - 1);
+      I = mark[id];
+
+      try {
+        if (I > max_size) {
+          ml_loge("Error: Test case id cannot exceed maximum number of test");
+          status = ML_ERROR_INVALID_PARAMETER;
+          throw std::runtime_error(
+            "Error: Test case id cannot exceed maximum number of test");
+        }
+      } catch (...) {
+        globalExceptionPtr = std::current_exception();
+        NN_EXCEPTION_NOTI(DATA_ERROR);
+        return;
+      }
+
+      mark.erase(mark.begin() + id);
+      uint64_t position = (I * input_size + I * class_num) * sizeof(float);
+      try {
+        if (position > file_length || position > ULLONG_MAX) {
+          ml_loge("Error: Cannot exceed max file size");
+          status = ML_ERROR_INVALID_PARAMETER;
+          throw std::runtime_error("Error: Cannot exceed max file size");
+        }
+      } catch (...) {
+        globalExceptionPtr = std::current_exception();
+        NN_EXCEPTION_NOTI(DATA_ERROR);
+        return;
+      }
+
+      file.seekg(position, std::ios::beg);
+
+      for (unsigned int j = 0; j < input_size; ++j) {
+        float d;
+        file.read((char *)&d, sizeof(float));
+        vec.push_back(d);
+      }
+
+      for (unsigned int j = 0; j < class_num; ++j) {
+        float d;
+        file.read((char *)&d, sizeof(float));
+        veclabel.push_back(d);
+      }
+
+      data_lock.lock();
+      data->push_back(vec);
+      datalabel->push_back(veclabel);
+      (*rest_size)--;
+      (*cur_size)++;
+      data_lock.unlock();
+    }
+
+    if (buf_size == (*cur_size)) {
+      NN_EXCEPTION_NOTI(DATA_READY);
+    }
+  }
+  file.close();
+}
+
+int DataBufferFromDataFile::setDataFile(std::string path, DataType type) {
+  int status = ML_ERROR_NONE;
+  std::ifstream data_file(path.c_str());
+
+  switch (type) {
+  case DATA_TRAIN: {
+    if (!data_file.good()) {
+      ml_loge(
+        "Error: Cannot open data file, Datafile is necessary for training");
+      validation[type] = false;
+      return ML_ERROR_INVALID_PARAMETER;
+    }
+    train_name = path;
+  } break;
+  case DATA_VAL: {
+    if (!data_file.good()) {
+      ml_logw("Warning: Cannot open validation data file. Cannot validate "
+              "training result");
+      validation[type] = false;
+      break;
+    }
+    val_name = path;
+  } break;
+  case DATA_TEST: {
+    if (!data_file.good()) {
+      ml_logw(
+        "Warning: Cannot open test data file. Cannot test training result");
+      validation[type] = false;
+      break;
+    }
+    test_name = path;
+  } break;
+  case DATA_LABEL: {
+    std::string data;
+    if (!data_file.good()) {
+      ml_loge("Error: Cannot open label file");
+      SET_VALIDATION(false);
+      return ML_ERROR_INVALID_PARAMETER;
+    }
+    while (data_file >> data) {
+      labels.push_back(data);
+    }
+    if (class_num != 0 && class_num != labels.size()) {
+      ml_loge("Error: number of label should be same with number class number");
+      SET_VALIDATION(false);
+      return ML_ERROR_INVALID_PARAMETER;
+    }
+    class_num = labels.size();
+  } break;
+  case DATA_UNKNOWN:
+  default:
+    ml_loge("Error: Not Supported Data Type");
+    SET_VALIDATION(false);
+    return ML_ERROR_INVALID_PARAMETER;
+    break;
+  }
+  return status;
+}
+
+int DataBufferFromDataFile::setFeatureSize(unsigned int size) {
+  int status = ML_ERROR_NONE;
+  long file_size = 0;
+
+  status = DataBuffer::setFeatureSize(size);
+  if (status != ML_ERROR_NONE)
+    return status;
+
+  if (validation[DATA_TRAIN]) {
+    file_size = getFileSize(train_name);
+    max_train = static_cast<unsigned int>(
+      file_size / (class_num * sizeof(int) + input_size * sizeof(float)));
+    if (max_train < mini_batch) {
+      ml_logw(
+        "Warning: number of training data is smaller than mini batch size");
+    }
+  } else {
+    max_train = 0;
+  }
+
+  if (validation[DATA_VAL]) {
+    file_size = getFileSize(val_name);
+    max_val = static_cast<unsigned int>(
+      file_size / (class_num * sizeof(int) + input_size * sizeof(float)));
+    if (max_val < mini_batch) {
+      ml_logw("Warning: number of val data is smaller than mini batch size");
+    }
+  } else {
+    max_val = 0;
+  }
+
+  if (validation[DATA_TEST]) {
+    file_size = getFileSize(test_name);
+    max_test = static_cast<unsigned int>(
+      file_size / (class_num * sizeof(int) + input_size * sizeof(float)));
+    if (max_test < mini_batch) {
+      ml_logw("Warning: number of test data is smaller than mini batch size");
+    }
+  } else {
+    max_test = 0;
+  }
+
+  return status;
+}
+
+} /* namespace nntrainer */
diff --git a/nntrainer/src/databuffer_func.cpp b/nntrainer/src/databuffer_func.cpp
new file mode 100644 (file)
index 0000000..73cf8b3
--- /dev/null
@@ -0,0 +1,222 @@
+/**
+ * Copyright (C) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ *
+ * @file       databuffer_file.cpp
+ * @date       27 April 2020
+ * @brief      This is buffer object take data from raw files
+ * @see                https://github.com/nnstreamer/nntrainer
+ * @author     Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug                No known bugs except for NYI items
+ *
+ */
+
+#include "databuffer_func.h"
+#include "nntrainer_error.h"
+#include <assert.h>
+#include <climits>
+#include <condition_variable>
+#include <cstring>
+#include <functional>
+#include <iomanip>
+#include <mutex>
+#include <nntrainer_log.h>
+#include <sstream>
+#include <stdexcept>
+#include <stdio.h>
+#include <stdlib.h>
+#include <thread>
+
+namespace nntrainer {
+
+extern std::mutex data_lock;
+
+extern std::mutex readyTrainData;
+extern std::mutex readyValData;
+extern std::mutex readyTestData;
+
+extern std::condition_variable cv_train;
+extern std::condition_variable cv_val;
+extern std::condition_variable cv_test;
+
+extern DataStatus trainReadyFlag;
+extern DataStatus valReadyFlag;
+extern DataStatus testReadyFlag;
+
+int DataBufferFromCallback::init() {
+  int status = ML_ERROR_NONE;
+
+  if (!class_num) {
+    ml_loge("Error: number of class must be set");
+    SET_VALIDATION(false);
+    return ML_ERROR_INVALID_PARAMETER;
+  }
+
+  if (!this->input_size) {
+    ml_loge("Error: featuer size must be set");
+    SET_VALIDATION(false);
+    return ML_ERROR_INVALID_PARAMETER;
+  }
+
+  this->cur_train_bufsize = 0;
+  this->cur_val_bufsize = 0;
+  this->cur_test_bufsize = 0;
+
+  this->max_train = 0;
+  this->max_val = 0;
+  this->max_test = 0;
+
+  if (mini_batch == 0) {
+    ml_loge("Error: mini batch size must be greater than 0");
+    SET_VALIDATION(false);
+    return ML_ERROR_INVALID_PARAMETER;
+  }
+
+  this->train_running = true;
+  this->val_running = true;
+  this->test_running = true;
+
+  trainReadyFlag = DATA_NOT_READY;
+  valReadyFlag = DATA_NOT_READY;
+  testReadyFlag = DATA_NOT_READY;
+
+  return status;
+}
+
+int DataBufferFromCallback::setFunc(
+  BufferType type, std::function<bool(vec_3d &, vec_3d &, int &)> func) {
+
+  int status = ML_ERROR_NONE;
+  switch (type) {
+  case BUF_TRAIN:
+    callback_train = func;
+    if (func == NULL)
+      validation[0] = false;
+    break;
+  case BUF_VAL:
+    callback_val = func;
+    if (func == NULL)
+      validation[1] = false;
+    break;
+  case BUF_TEST:
+    callback_test = func;
+    if (func == NULL)
+      validation[2] = false;
+    break;
+  default:
+    status = ML_ERROR_INVALID_PARAMETER;
+    break;
+  }
+
+  return status;
+}
+
+void DataBufferFromCallback::updateData(BufferType type, int &status) {
+  status = ML_ERROR_NONE;
+
+  unsigned int buf_size = 0;
+  unsigned int *cur_size = NULL;
+  bool *running = NULL;
+  std::vector<std::vector<float>> *data = NULL;
+  std::vector<std::vector<float>> *datalabel = NULL;
+  std::function<bool(vec_3d &, vec_3d &, int &)> callback;
+
+  switch (type) {
+  case BUF_TRAIN: {
+    buf_size = bufsize;
+    cur_size = &cur_train_bufsize;
+    running = &train_running;
+    data = &train_data;
+    datalabel = &train_data_label;
+    callback = callback_train;
+  } break;
+  case BUF_VAL: {
+    buf_size = bufsize;
+    cur_size = &cur_val_bufsize;
+    running = &val_running;
+    data = &val_data;
+    datalabel = &val_data_label;
+    callback = callback_val;
+  } break;
+  case BUF_TEST: {
+    buf_size = bufsize;
+    cur_size = &cur_test_bufsize;
+    running = &test_running;
+    data = &test_data;
+    datalabel = &test_data_label;
+    callback = callback_test;
+  } break;
+  default:
+    break;
+  }
+
+  while ((*running)) {
+    if (buf_size - (*cur_size) > 0) {
+      vec_3d vec;
+      vec_3d veclabel;
+
+      bool endflag = callback(vec, veclabel, status);
+      if (!endflag)
+        break;
+
+      if (vec.size() != veclabel.size()) {
+        status = ML_ERROR_INVALID_PARAMETER;
+      }
+
+      for (unsigned int i = 0; i < vec.size(); ++i) {
+        std::vector<float> v;
+        std::vector<float> vl;
+        for (unsigned int j = 0; j < vec[i].size(); ++j) {
+          for (unsigned int k = 0; k < vec[i][j].size(); ++k) {
+            v.push_back(vec[i][j][k]);
+          }
+        }
+        for (unsigned int j = 0; j < veclabel[i].size(); ++j) {
+          for (unsigned int k = 0; k < veclabel[i][j].size(); ++k) {
+            vl.push_back(veclabel[i][j][k]);
+          }
+        }
+
+        data_lock.lock();
+        data->push_back(v);
+        datalabel->push_back(vl);
+        (*cur_size)++;
+        data_lock.unlock();
+      }
+    }
+
+    if (buf_size == (*cur_size)) {
+      switch (type) {
+      case BUF_TRAIN: {
+        std::lock_guard<std::mutex> lgtrain(readyTrainData);
+        trainReadyFlag = DATA_READY;
+        cv_train.notify_all();
+      } break;
+      case BUF_VAL: {
+        std::lock_guard<std::mutex> lgval(readyValData);
+        valReadyFlag = DATA_READY;
+        cv_val.notify_all();
+      } break;
+      case BUF_TEST: {
+        std::lock_guard<std::mutex> lgtest(readyTestData);
+        testReadyFlag = DATA_READY;
+        cv_test.notify_all();
+      } break;
+      default:
+        break;
+      }
+    }
+  }
+}
+
+} /* namespace nntrainer */
index 32fd8f2..95c3e80 100644 (file)
@@ -22,6 +22,8 @@
  */
 
 #include "neuralnet.h"
+#include "databuffer_file.h"
+#include "databuffer_func.h"
 #include "iniparser.h"
 #include "nntrainer_error.h"
 #include <array>
index 36e45de..32a0ec0 100644 (file)
@@ -175,6 +175,8 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/
 
 %files devel
 %{_includedir}/nntrainer/databuffer.h
+%{_includedir}/nntrainer/databuffer_file.h
+%{_includedir}/nntrainer/databuffer_func.h
 %{_includedir}/nntrainer/layers.h
 %{_includedir}/nntrainer/neuralnet.h
 %{_includedir}/nntrainer/tensor.h
index 3999eaf..35262fb 100644 (file)
@@ -20,6 +20,8 @@
  * @author      Jijoong Moon <jijoong.moon@samsung.com>
  * @bug         No known bugs
  */
+#include "databuffer_file.h"
+#include "databuffer_func.h"
 #include "neuralnet.h"
 #include "util_func.h"
 #include <fstream>