add doxygen document for Training
authorjijoong.moon <jijoong.moon@samsung.com>
Wed, 4 Dec 2019 10:59:36 +0000 (19:59 +0900)
committer문지중/On-Device Lab(SR)/Principal Engineer/삼성전자 <jijoong.moon@samsung.com>
Wed, 4 Dec 2019 23:20:39 +0000 (08:20 +0900)
add doxygen documents for Training

**Self evaluation:**
1. Build test:  [X]Passed [ ]Failed [ ]Skipped
2. Run test:  [X]Passed [ ]Failed [ ]Skipped

Signed-off-by: jijoong.moon <jijoong.moon@samsung.com>
NeuralNet/include/neuralnet.h
Training/jni/main.cpp

index cba4671..c883762 100644 (file)
@@ -63,12 +63,11 @@ class NeuralNetwork {
    */
   ~NeuralNetwork(){};
 
-  /**
+  /**
     * @brief     Get Loss
     * @retval    loss value
     */
-      double
-      getLoss();
+  double getLoss();
 
   /**
    * @brief     Set Loss
index 9744ace..ba402e0 100644 (file)
@@ -1,28 +1,66 @@
+/**
+ * @file       main.cpp
+ * @date       04 December 2019
+ * @see                https://github.sec.samsung.net/jijoong-moon/Transfer-Learning.git
+ * @author     Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug                No known bugs except for NYI items
+ * @brief      This is Transfer Learning Example with one FC Layer
+ *
+ *              Inputs : Three Categories ( Happy, Sad, Soso ) with
+ *                       5 pictures for each category
+ *              Feature Extractor : ssd_mobilenet_v2_coco_feature.tflite
+ *                                  ( modified to use feature extracter )
+ *              Classifier : One Fully Connected Layer
+ *
+ */
+
+#include <stdlib.h>
+#include <time.h>
+#include <cmath>
+#include <fstream>
+#include <iostream>
 #include "bitmap_helpers.h"
 #include "tensorflow/contrib/lite/interpreter.h"
 #include "tensorflow/contrib/lite/kernels/register.h"
 #include "tensorflow/contrib/lite/model.h"
 #include "tensorflow/contrib/lite/string_util.h"
 #include "tensorflow/contrib/lite/tools/gen_op_registration.h"
-#include <cmath>
-#include <fstream>
-#include <iostream>
-#include <stdlib.h>
-#include <time.h>
 
 #include "include/matrix.h"
 #include "include/neuralnet.h"
 
+/**
+ * @brief     Data size for each category
+ */
 #define TOTAL_DATA_SIZE 5
+
+/**
+ * @brief     Number of category : Three
+ */
 #define TOTAL_LABEL_SIZE 3
+
+/**
+ * @brief     Number of Test Set
+ */
 #define TOTAL_TEST_SIZE 8
+
+/**
+ * @brief     Max Epoch
+ */
 #define ITERATION 300
-#define LEARNING_RATE 0.7
 
 using namespace std;
 
+/**
+ * @brief     location of resources ( ../../res/ )
+ */
 string data_path;
 
+/**
+ * @brief     step function
+ * @param[in] x value to be distinguished
+ * @retval 0.0 or 1.0
+ */
 double stepFunction(double x) {
   if (x > 0.9) {
     return 1.0;
@@ -35,6 +73,12 @@ double stepFunction(double x) {
   return x;
 }
 
+/**
+ * @brief     Get Feature vector from tensorflow lite
+ *            This creates interpreter & inference with ssd tflite
+ * @param[in] filename input file path
+ * @param[out] feature_input save output of tflite
+ */
 void getFeature(const string filename, vector<double> &feature_input) {
   int input_size;
   int output_size;
@@ -45,8 +89,7 @@ void getFeature(const string filename, vector<double> &feature_input) {
   int input_idx_list_len = 0;
   int output_idx_list_len = 0;
   std::string model_path = data_path + "ssd_mobilenet_v2_coco_feature.tflite";
-  std::unique_ptr<tflite::FlatBufferModel> model =
-      tflite::FlatBufferModel::BuildFromFile(model_path.c_str());
+  std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(model_path.c_str());
 
   assert(model != NULL);
   tflite::ops::builtin::BuiltinOpResolver resolver;
@@ -62,13 +105,11 @@ void getFeature(const string filename, vector<double> &feature_input) {
   int t_size = interpreter->tensors_size();
   for (int i = 0; i < t_size; i++) {
     for (int j = 0; j < input_size; j++) {
-      if (strcmp(interpreter->tensor(i)->name, interpreter->GetInputName(j)) ==
-          0)
+      if (strcmp(interpreter->tensor(i)->name, interpreter->GetInputName(j)) == 0)
         input_idx_list[input_idx_list_len++] = i;
     }
     for (int j = 0; j < output_size; j++) {
-      if (strcmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j)) ==
-          0)
+      if (strcmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j)) == 0)
         output_idx_list[output_idx_list_len++] = i;
     }
   }
@@ -79,12 +120,10 @@ void getFeature(const string filename, vector<double> &feature_input) {
 
   int len = interpreter->tensor(input_idx_list[0])->dims->size;
   std::reverse_copy(interpreter->tensor(input_idx_list[0])->dims->data,
-                    interpreter->tensor(input_idx_list[0])->dims->data + len,
-                    inputDim);
+                    interpreter->tensor(input_idx_list[0])->dims->data + len, inputDim);
   len = interpreter->tensor(output_idx_list[0])->dims->size;
   std::reverse_copy(interpreter->tensor(output_idx_list[0])->dims->data,
-                    interpreter->tensor(output_idx_list[0])->dims->data + len,
-                    outputDim);
+                    interpreter->tensor(output_idx_list[0])->dims->data + len, outputDim);
 
   int output_number_of_pixels = 1;
   int wanted_channels = inputDim[0];
@@ -98,16 +137,14 @@ void getFeature(const string filename, vector<double> &feature_input) {
 
   uint8_t *in;
   float *output;
-  in = tflite::label_image::read_bmp(filename, &wanted_width, &wanted_height,
-                                     &wanted_channels);
+  in = tflite::label_image::read_bmp(filename, &wanted_width, &wanted_height, &wanted_channels);
   if (interpreter->AllocateTensors() != kTfLiteOk) {
     std::cout << "Failed to allocate tensors!" << std::endl;
     exit(0);
   }
 
   for (int l = 0; l < output_number_of_pixels; l++) {
-    (interpreter->typed_tensor<float>(_input))[l] =
-        ((float)in[l] - 127.5f) / 127.5f;
+    (interpreter->typed_tensor<float>(_input))[l] = ((float)in[l] - 127.5f) / 127.5f;
   }
 
   if (interpreter->Invoke() != kTfLiteOk) {
@@ -125,8 +162,13 @@ void getFeature(const string filename, vector<double> &feature_input) {
   delete[] output_idx_list;
 }
 
-void ExtractFeatures(std::string p, vector<vector<double>> &feature_input,
-                     vector<vector<double>> &feature_output) {
+/**
+ * @brief     Extract the features from all three categories
+ * @param[in] p data path
+ * @param[out] feature_input save output of tflite
+ * @param[out] feature_output save label data
+ */
+void ExtractFeatures(std::string p, vector<vector<double>> &feature_input, vector<vector<double>> &feature_output) {
   string total_label[TOTAL_LABEL_SIZE] = {"happy", "sad", "soso"};
 
   int trainingSize = TOTAL_LABEL_SIZE * TOTAL_DATA_SIZE;
@@ -155,6 +197,12 @@ void ExtractFeatures(std::string p, vector<vector<double>> &feature_input,
   }
 }
 
+/**
+ * @brief     create NN
+ *            Get Feature from tflite & run foword & back propatation
+ * @param[in]  arg 1 : configuration file path
+ * @param[in]  arg 2 : resource path
+ */
 int main(int argc, char *argv[]) {
   if (argc < 3) {
     std::cout << "./TransferLearning Config.ini resources\n";
@@ -167,21 +215,33 @@ int main(int argc, char *argv[]) {
   srand(time(NULL));
   std::string ini_file = data_path + "ini.bin";
   std::vector<std::vector<double>> inputVector, outputVector;
+
+  /**
+   * @brief     Extract Feature
+   */
   ExtractFeatures(data_path, inputVector, outputVector);
 
+  /**
+   * @brief     Neural Network Create & Initialization
+   */
   Network::NeuralNetwork NN;
   NN.setConfig(config);
   NN.init();
 
+  /**
+   * @brief     back propagation
+   */
   for (int i = 0; i < ITERATION; i++) {
     for (unsigned int j = 0; j < inputVector.size(); j++) {
       NN.backwarding(Matrix({inputVector[j]}), Matrix({outputVector[j]}), i);
     }
-    cout << "#" << i + 1 << "/" << ITERATION << " - Loss : " << NN.getLoss()
-         << endl;
+    cout << "#" << i + 1 << "/" << ITERATION << " - Loss : " << NN.getLoss() << endl;
     NN.setLoss(0.0);
   }
 
+  /**
+   * @brief     test
+   */
   for (int i = 0; i < TOTAL_TEST_SIZE; i++) {
     std::string path = data_path;
     path += "testset";
@@ -196,5 +256,9 @@ int main(int argc, char *argv[]) {
     Matrix X = Matrix({featureVector});
     cout << NN.forwarding(X).applyFunction(stepFunction) << endl;
   }
+
+  /**
+   * @brief     Finalize NN
+   */
   NN.finalize();
 }