+/**
+ * @file main.cpp
+ * @date 04 December 2019
+ * @see https://github.sec.samsung.net/jijoong-moon/Transfer-Learning.git
+ * @author Jijoong Moon <jijoong.moon@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is Transfer Learning Example with one FC Layer
+ *
+ * Inputs : Three Categories ( Happy, Sad, Soso ) with
+ * 5 pictures for each category
+ * Feature Extractor : ssd_mobilenet_v2_coco_feature.tflite
+ * ( modified to use feature extracter )
+ * Classifier : One Fully Connected Layer
+ *
+ */
+
+#include <stdlib.h>
+#include <time.h>
+#include <cmath>
+#include <fstream>
+#include <iostream>
#include "bitmap_helpers.h"
#include "tensorflow/contrib/lite/interpreter.h"
#include "tensorflow/contrib/lite/kernels/register.h"
#include "tensorflow/contrib/lite/model.h"
#include "tensorflow/contrib/lite/string_util.h"
#include "tensorflow/contrib/lite/tools/gen_op_registration.h"
-#include <cmath>
-#include <fstream>
-#include <iostream>
-#include <stdlib.h>
-#include <time.h>
#include "include/matrix.h"
#include "include/neuralnet.h"
+/**
+ * @brief Data size for each category
+ */
#define TOTAL_DATA_SIZE 5
+
+/**
+ * @brief Number of category : Three
+ */
#define TOTAL_LABEL_SIZE 3
+
+/**
+ * @brief Number of Test Set
+ */
#define TOTAL_TEST_SIZE 8
+
+/**
+ * @brief Max Epoch
+ */
#define ITERATION 300
-#define LEARNING_RATE 0.7
using namespace std;
+/**
+ * @brief location of resources ( ../../res/ )
+ */
string data_path;
+/**
+ * @brief step function
+ * @param[in] x value to be distinguished
+ * @retval 0.0 or 1.0
+ */
double stepFunction(double x) {
if (x > 0.9) {
return 1.0;
return x;
}
+/**
+ * @brief Get Feature vector from tensorflow lite
+ * This creates interpreter & inference with ssd tflite
+ * @param[in] filename input file path
+ * @param[out] feature_input save output of tflite
+ */
void getFeature(const string filename, vector<double> &feature_input) {
int input_size;
int output_size;
int input_idx_list_len = 0;
int output_idx_list_len = 0;
std::string model_path = data_path + "ssd_mobilenet_v2_coco_feature.tflite";
- std::unique_ptr<tflite::FlatBufferModel> model =
- tflite::FlatBufferModel::BuildFromFile(model_path.c_str());
+ std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(model_path.c_str());
assert(model != NULL);
tflite::ops::builtin::BuiltinOpResolver resolver;
int t_size = interpreter->tensors_size();
for (int i = 0; i < t_size; i++) {
for (int j = 0; j < input_size; j++) {
- if (strcmp(interpreter->tensor(i)->name, interpreter->GetInputName(j)) ==
- 0)
+ if (strcmp(interpreter->tensor(i)->name, interpreter->GetInputName(j)) == 0)
input_idx_list[input_idx_list_len++] = i;
}
for (int j = 0; j < output_size; j++) {
- if (strcmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j)) ==
- 0)
+ if (strcmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j)) == 0)
output_idx_list[output_idx_list_len++] = i;
}
}
int len = interpreter->tensor(input_idx_list[0])->dims->size;
std::reverse_copy(interpreter->tensor(input_idx_list[0])->dims->data,
- interpreter->tensor(input_idx_list[0])->dims->data + len,
- inputDim);
+ interpreter->tensor(input_idx_list[0])->dims->data + len, inputDim);
len = interpreter->tensor(output_idx_list[0])->dims->size;
std::reverse_copy(interpreter->tensor(output_idx_list[0])->dims->data,
- interpreter->tensor(output_idx_list[0])->dims->data + len,
- outputDim);
+ interpreter->tensor(output_idx_list[0])->dims->data + len, outputDim);
int output_number_of_pixels = 1;
int wanted_channels = inputDim[0];
uint8_t *in;
float *output;
- in = tflite::label_image::read_bmp(filename, &wanted_width, &wanted_height,
- &wanted_channels);
+ in = tflite::label_image::read_bmp(filename, &wanted_width, &wanted_height, &wanted_channels);
if (interpreter->AllocateTensors() != kTfLiteOk) {
std::cout << "Failed to allocate tensors!" << std::endl;
exit(0);
}
for (int l = 0; l < output_number_of_pixels; l++) {
- (interpreter->typed_tensor<float>(_input))[l] =
- ((float)in[l] - 127.5f) / 127.5f;
+ (interpreter->typed_tensor<float>(_input))[l] = ((float)in[l] - 127.5f) / 127.5f;
}
if (interpreter->Invoke() != kTfLiteOk) {
delete[] output_idx_list;
}
-void ExtractFeatures(std::string p, vector<vector<double>> &feature_input,
- vector<vector<double>> &feature_output) {
+/**
+ * @brief Extract the features from all three categories
+ * @param[in] p data path
+ * @param[out] feature_input save output of tflite
+ * @param[out] feature_output save label data
+ */
+void ExtractFeatures(std::string p, vector<vector<double>> &feature_input, vector<vector<double>> &feature_output) {
string total_label[TOTAL_LABEL_SIZE] = {"happy", "sad", "soso"};
int trainingSize = TOTAL_LABEL_SIZE * TOTAL_DATA_SIZE;
}
}
+/**
+ * @brief create NN
+ * Get Feature from tflite & run foword & back propatation
+ * @param[in] arg 1 : configuration file path
+ * @param[in] arg 2 : resource path
+ */
int main(int argc, char *argv[]) {
if (argc < 3) {
std::cout << "./TransferLearning Config.ini resources\n";
srand(time(NULL));
std::string ini_file = data_path + "ini.bin";
std::vector<std::vector<double>> inputVector, outputVector;
+
+ /**
+ * @brief Extract Feature
+ */
ExtractFeatures(data_path, inputVector, outputVector);
+ /**
+ * @brief Neural Network Create & Initialization
+ */
Network::NeuralNetwork NN;
NN.setConfig(config);
NN.init();
+ /**
+ * @brief back propagation
+ */
for (int i = 0; i < ITERATION; i++) {
for (unsigned int j = 0; j < inputVector.size(); j++) {
NN.backwarding(Matrix({inputVector[j]}), Matrix({outputVector[j]}), i);
}
- cout << "#" << i + 1 << "/" << ITERATION << " - Loss : " << NN.getLoss()
- << endl;
+ cout << "#" << i + 1 << "/" << ITERATION << " - Loss : " << NN.getLoss() << endl;
NN.setLoss(0.0);
}
+ /**
+ * @brief test
+ */
for (int i = 0; i < TOTAL_TEST_SIZE; i++) {
std::string path = data_path;
path += "testset";
Matrix X = Matrix({featureVector});
cout << NN.forwarding(X).applyFunction(stepFunction) << endl;
}
+
+ /**
+ * @brief Finalize NN
+ */
NN.finalize();
}