--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="org.freedesktop.nnstreamer.nnstreamermultidevice"
+ android:versionCode="1"
+ android:versionName="1.0">
+
+ <uses-feature android:glEsVersion="0x00020000"/>
+ <uses-feature android:name="android.hardware.camera" android:required="false" />
+ <uses-permission android:name="android.permission.CAMERA" />
+ <uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE" />
+ <uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
+ <uses-permission android:name="android.permission.INTERNET" />
+ <uses-permission android:name="android.permission.ACCESS_NETWORK_STATE" />
+ <uses-permission android:name="android.permission.ACCESS_WIFI_STATE" />
+ <uses-permission android:name="android.permission.WAKE_LOCK" />
+
+ <application android:label="@string/app_name"
+ android:icon="@drawable/nnsuite_multi_logo"
+ >
+ <activity android:name=".NNStreamerMultiDevice"
+ android:screenOrientation="portrait"
+ android:label="@string/app_name">
+ <intent-filter>
+ <action android:name="android.intent.action.MAIN" />
+ <category android:name="android.intent.category.LAUNCHER" />
+ </intent-filter>
+ </activity>
+ </application>
+</manifest>
--- /dev/null
+# Transfer-Learning
+
+I made some toy examples which is similar with Apple's Sticker.
+The Mobile ssd V2 tensor flow lite model is used for the feature extractor and Nearest Neighbor is used for the classifier. All the training and testing is done on the Galaxy S8.
+
+![image](https://github.sec.samsung.net/storage/user/19415/files/08b09a80-ef29-11e9-8303-475fd75f4b83)
+
+Happy(^^), sad(TT), soso(ㅡㅡ) classes are used and prepare 5 images for the training and two images for the test set each as below.
+
+![image](https://github.sec.samsung.net/storage/user/19415/files/a73cfb80-ef29-11e9-9ae9-0d6531538eaf)
+
+After remove the fully connected layer of mobile ssd v2, 128 features are extracted. The features from first training set data is below.
+
+![image](https://github.sec.samsung.net/storage/user/19415/files/0997fb00-ef2e-11e9-90a3-51c27bf4013f)
+
+
+Simple euclidean distance is calculated and the result is quite good. All the test set is collected.
+
+![image](https://github.sec.samsung.net/storage/user/19415/files/87103b00-ef2f-11e9-9c1a-83da0faafb63)
+
+Due to the simplicity of this toy example, all the test results are collect.
+
+I made two more random pictures which little bit differ from right image. As you can see, it is little bit hard to tell which class it is. First image could be classified as "happy" but the red zone is across with sad and the variance is quite small. Second image is more confused. Cause the smallest distance is all over the classes.
+May be should be define the threshold which I didn't.^^;;
+
+![image](https://github.sec.samsung.net/storage/user/19415/files/33552000-ef36-11e9-88f6-ea6a35ccdf6b)
--- /dev/null
+buildscript {
+ repositories {
+ jcenter() // or mavenCentral()
+ google()
+ }
+ dependencies {
+ classpath 'com.android.tools.build:gradle:3.4.1'
+ }
+}
+
+allprojects {
+ repositories {
+ jcenter()
+ google()
+ }
+}
+
+apply plugin: 'com.android.application'
+
+android {
+ compileSdkVersion 24
+ buildToolsVersion '28.0.3'
+
+ defaultConfig {
+ applicationId "org.freedesktop.gstreamer.nnstreamer.nnstreamermultidevice"
+ minSdkVersion 24
+ targetSdkVersion 24
+ versionCode 1
+ versionName "1.0"
+
+
+ externalNativeBuild {
+ ndkBuild {
+ def gstRoot
+
+ if (project.hasProperty('gstAndroidRoot'))
+ gstRoot = project.gstAndroidRoot
+ else
+ gstRoot = System.env.GSTREAMER_ROOT_ANDROID
+
+ if (gstRoot == null)
+ throw new GradleException('GSTREAMER_ROOT_ANDROID must be set, or "gstAndroidRoot" must be defined in your gradle.properties in the top level directory of the unpacked universal GStreamer Android binaries')
+
+ arguments "NDK_APPLICATION_MK=jni/Application.mk", "GSTREAMER_JAVA_SRC_DIR=src", "GSTREAMER_ROOT_ANDROID=$gstRoot", "GSTREAMER_ASSETS_DIR=src/assets"
+
+ targets "nnstreamer_multidevice"
+
+ // All archs except MIPS and MIPS64 are supported
+ abiFilters 'arm64-v8a'
+ }
+ }
+ }
+
+ buildTypes {
+ release {
+ minifyEnabled false
+ proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.pro'
+ sourceSets {
+ main {
+ manifest.srcFile 'AndroidManifest.xml'
+ java.srcDirs = ['src']
+ resources.srcDirs = ['src']
+ aidl.srcDirs = ['src']
+ renderscript.srcDirs = ['src']
+ res.srcDirs = ['res']
+ assets.srcDirs = ['assets']
+ }
+ }
+ }
+ }
+
+ externalNativeBuild {
+ ndkBuild {
+ path 'jni/Android.mk'
+ }
+ }
+}
+
+afterEvaluate {
+ if (project.hasProperty('compileDebugJavaWithJavac'))
+ project.compileDebugJavaWithJavac.dependsOn 'externalNativeBuildDebug'
+ if (project.hasProperty('compileReleaseJavaWithJavac'))
+ project.compileReleaseJavaWithJavac.dependsOn 'externalNativeBuildRelease'
+}
+
+dependencies {
+ implementation fileTree(dir: 'libs', include: ['*.jar'])
+ testImplementation 'junit:junit:4.12'
+ implementation 'com.android.support:appcompat-v7:24.0.0'
+}
--- /dev/null
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# ndk path
+ifndef ANDROID_NDK
+$(error ANDROID_NDK is not defined!)
+endif
+
+include $(CLEAR_VARS)
+
+ifndef TENSORFLOW_ROOT
+ifneq ($(MAKECMDGOALS),clean)
+$(warning TENSORFLOW_ROOT is not defined!)
+$(warning TENSORFLOW SRC is going to be downloaded!)
+
+# Currently we are using tensorflow 1.9.0
+$(info $(shell ($(LOCAL_PATH)/prepare_tflite.sh)))
+
+TENSORFLOW_ROOT := $(LOCAL_PATH)/tensorflow-1.9.0
+
+endif
+endif
+
+TF_LITE_DIR=$(TENSORFLOW_ROOT)/tensorflow/contrib/lite
+
+LOCAL_MODULE := tensorflow-lite
+TFLITE_SRCS := \
+ $(wildcard $(TF_LITE_DIR)/*.cc) \
+ $(wildcard $(TF_LITE_DIR)/kernels/*.cc) \
+ $(wildcard $(TF_LITE_DIR)/kernels/internal/*.cc) \
+ $(wildcard $(TF_LITE_DIR)/kernels/internal/optimized/*.cc) \
+ $(wildcard $(TF_LITE_DIR)/kernels/internal/reference/*.cc) \
+ $(wildcard $(TF_LITE_DIR)/*.c) \
+ $(wildcard $(TF_LITE_DIR)/kernels/*.c) \
+ $(wildcard $(TF_LITE_DIR)/kernels/internal/*.c) \
+ $(wildcard $(TF_LITE_DIR)/kernels/internal/optimized/*.c) \
+ $(wildcard $(TF_LITE_DIR)/kernels/internal/reference/*.c) \
+ $(wildcard $(TF_LITE_DIR)/downloads/farmhash/src/farmhash.cc) \
+ $(wildcard $(TF_LITE_DIR)/downloads/fft2d/fftsg.c)
+
+TFLITE_SRCS := $(sort $(TFLITE_SRCS))
+
+TFLITE_EXCLUDE_SRCS := \
+ $(wildcard $(TF_LITE_DIR)/*test.cc) \
+ $(wildcard $(TF_LITE_DIR)/*/*test.cc) \
+ $(wildcard $(TF_LITE_DIR)/*/*/*test.cc) \
+ $(wildcard $(TF_LITE_DIR)/*/*/*/*test.cc) \
+ $(wildcard $(TF_LITE_DIR)/kernels/test_util.cc) \
+ $(wildcard $(TF_LITE_DIR)/examples/minimal/minimal.cc)
+
+TFLITE_SRCS := $(filter-out $(TFLITE_EXCLUDE_SRCS), $(TFLITE_SRCS))
+# ANDROID_NDK env should be set before build
+TFLITE_INCLUDES := \
+ $(ANDROID_NDK)/../ \
+ $(TENSORFLOW_ROOT) \
+ $(TF_LITE_DIR)/downloads \
+ $(TF_LITE_DIR)/downloads/eigen \
+ $(TF_LITE_DIR)/downloads/gemmlowp \
+ $(TF_LITE_DIR)/downloads/neon_2_sse \
+ $(TF_LITE_DIR)/downloads/farmhash/src \
+ $(TF_LITE_DIR)/downloads/flatbuffers/include
+
+
+LOCAL_SRC_FILES := $(TFLITE_SRCS)
+LOCAL_C_INCLUDES := $(TFLITE_INCLUDES)
+
+LOCAL_CFLAGS += -O3 -DNDEBUG
+LOCAL_CXXFLAGS += -std=c++11 -frtti -fexceptions -O3 -DNDEBUG
+
+include $(BUILD_STATIC_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_ARM_NEON := true
+LOCAL_CFLAGS += -std=c++11 -Ofast -mcpu=cortex-a53 -Ilz4-nougat/lib
+LOCAL_LDFLAGS += -Llz4-nougat/lib/obj/local/arm64-v8a/
+LOCAL_CXXFLAGS += -std=c++11
+LOCAL_CFLAGS += -pthread -fopenmp
+LOCAL_LDFLAGS += -fopenmp
+LOCAL_MODULE_TAGS := optional
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := transfer_learning_fc
+
+LOCAL_SRC_FILES := main.cpp matrix.cpp bitmap_helpers.cpp
+
+LOCAL_STATIC_LIBRARIES := tensorflow-lite
+
+LOCAL_C_INCLUDES += $(TFLITE_INCLUDES)
+
+include $(BUILD_EXECUTABLE)
--- /dev/null
+APP_ABI = arm64-v8a
+APP_STL = c++_shared
+APP_PLATFORM=android-24
--- /dev/null
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+
+#include <unistd.h> // NOLINT(build/include_order)
+
+#include "bitmap_helpers.h"
+
+#define LOG(x) std::cerr
+
+namespace tflite
+{
+ namespace label_image
+ {
+
+ uint8_t *decode_bmp (const uint8_t * input, int row_size,
+ uint8_t * const output, int width, int height,
+ int channels, bool top_down)
+ {
+ for (int i = 0; i < height; i++)
+ {
+ int src_pos;
+ int dst_pos;
+
+ for (int j = 0; j < width; j++)
+ {
+ if (!top_down) {
+ src_pos = ((height - 1 - i) * row_size) + j * channels;
+ } else
+ {
+ src_pos = i * row_size + j * channels;
+ }
+
+ dst_pos = (i * width + j) * channels;
+
+ switch (channels) {
+ case 1:
+ output[dst_pos] = input[src_pos];
+ break;
+ case 3:
+ // BGR -> RGB
+ output[dst_pos] = input[src_pos + 2];
+ output[dst_pos + 1] = input[src_pos + 1];
+ output[dst_pos + 2] = input[src_pos];
+ break;
+ case 4:
+ // BGRA -> RGBA
+ output[dst_pos] = input[src_pos + 2];
+ output[dst_pos + 1] = input[src_pos + 1];
+ output[dst_pos + 2] = input[src_pos];
+ output[dst_pos + 3] = input[src_pos + 3];
+ break;
+ default:
+ LOG (FATAL) << "Unexpected number of channels: " << channels;
+ break;
+ }
+ }
+ }
+
+ return output;
+ }
+
+ uint8_t *read_bmp (const std::string & input_bmp_name, int *width,
+ int *height, int *channels)
+ {
+ int begin, end;
+
+ std::ifstream file (input_bmp_name, std::ios::in | std::ios::binary);
+ if (!file) {
+ LOG (FATAL) << "input file " << input_bmp_name << " not found\n";
+ exit (-1);
+ }
+
+ begin = file.tellg ();
+ file.seekg (0, std::ios::end);
+ end = file.tellg ();
+ size_t len = end - begin;
+
+ const uint8_t *img_bytes = new uint8_t[len];
+ file.seekg (0, std::ios::beg);
+ file.read ((char *) img_bytes, len);
+ const int32_t header_size =
+ *(reinterpret_cast < const int32_t * >(img_bytes + 10));
+ *width = *(reinterpret_cast < const int32_t * >(img_bytes + 18));
+ *height = *(reinterpret_cast < const int32_t * >(img_bytes + 22));
+ const int32_t bpp =
+ *(reinterpret_cast < const int32_t * >(img_bytes + 28));
+ *channels = bpp / 8;
+
+ // there may be padding bytes when the width is not a multiple of 4 bytes
+ // 8 * channels == bits per pixel
+ const int row_size = (8 * *channels * *width + 31) / 32 * 4;
+
+ // if height is negative, data layout is top down
+ // otherwise, it's bottom up
+ bool top_down = (*height < 0);
+
+ // Decode image, allocating tensor once the image size is known
+ uint8_t *output = new uint8_t[abs (*height) * *width * *channels];
+ const uint8_t *bmp_pixels = &img_bytes[header_size];
+ return decode_bmp (bmp_pixels, row_size, output, *width, abs (*height),
+ *channels, top_down);
+ }
+
+ } // namespace label_image
+} // namespace tflite
--- /dev/null
+/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+==============================================================================*/
+
+#ifndef TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_H_
+#define TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_H_
+
+namespace tflite
+{
+ namespace label_image
+ {
+ uint8_t *read_bmp (const std::string & input_bmp_name, int *width,
+ int *height, int *channels);
+ } // namespace label_image
+} // namespace tflite
+
+#endif // TENSORFLOW_CONTRIB_LITE_EXAMPLES_LABEL_IMAGE_BITMAP_HELPERS_H
--- /dev/null
+#include <stdio.h>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/string_util.h"
+#include "tensorflow/contrib/lite/string.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/tools/gen_op_registration.h"
+#include "bitmap_helpers.h"
+#include <iostream>
+#include <fstream>
+#include "math.h"
+
+#define TOTAL_DATA_SIZE 5
+#define TOTAL_LABEL_SIZE 3
+#define TOTAL_TEST_SIZE 8
+
+int KNN(float out[3][5][128], float *test){
+ int ret=0;
+ float dist[15];
+
+ int count =0;
+ float sum=0.0;
+ float max=100000.0;
+ int id = 0;
+ for(int i=0;i<TOTAL_LABEL_SIZE;i++){
+ for(int j=0;j<TOTAL_DATA_SIZE;j++){
+ float d;
+ for(int k=0;k<128;k++){
+ sum += (out[i][j][k] - test[k])*(out[i][j][k] - test[k]);
+ }
+ d=sqrt(sum);
+ dist[count++]=d;
+ if(d < max){
+ max=d;
+ id=i;
+ }
+ printf("id %d, dist %f\n", id, d);
+ sum=0.0;
+ }
+ }
+ ret = id;
+
+ return ret;
+}
+
+
+int main(){
+ int tensor_size;
+ int node_size;
+ int input_size;
+ int output_size;
+ int *output_idx_list;
+ int *input_idx_list;
+ int inputDim[4];
+ int outputDim[4];
+ int input_idx_list_len=0;
+ int output_idx_list_len = 0;
+ float out[TOTAL_LABEL_SIZE][TOTAL_DATA_SIZE][128];
+
+ char *total_label[TOTAL_LABEL_SIZE]={"happy","sad","soso"};
+ char *data_path="/sdcard/Transfer-Learning/";
+
+ std::unique_ptr<tflite::FlatBufferModel> model= tflite::FlatBufferModel::BuildFromFile("/sdcard/Transfer-Learning/ssd_mobilenet_v2_coco_feature.tflite");
+
+ if(!model){
+ printf("Failed to mmap mdoel\n");
+ exit(0);
+ }
+
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ std::unique_ptr<tflite::Interpreter> interpreter;
+ tflite::InterpreterBuilder(*model.get(), resolver)(&interpreter);
+
+ tensor_size = interpreter->tensors_size();
+ node_size = interpreter->nodes_size();
+ input_size = interpreter->inputs().size();
+ output_size = interpreter->outputs().size();
+
+ input_idx_list = new int[input_size];
+ output_idx_list = new int[output_size];
+
+ int t_size = interpreter->tensors_size();
+ for(int i=0;i<t_size;i++){
+ for(int j=0;j<input_size;j++){
+ if(strcmp(interpreter->tensor(i)->name, interpreter->GetInputName(j)) == 0)
+ input_idx_list[input_idx_list_len++]=i;
+ }
+ for(int j=0;j<output_size;j++){
+ if(strcmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j))==0)
+ output_idx_list[output_idx_list_len++] = i;
+ }
+ }
+
+ for(int i=0;i<4;i++){
+ inputDim[i]=1;
+ outputDim[i]=1;
+ }
+
+ int len = interpreter->tensor(input_idx_list[0])->dims->size;
+ std::reverse_copy(interpreter->tensor(input_idx_list[0])->dims->data,
+ interpreter->tensor(input_idx_list[0])->dims->data+len, inputDim);
+
+ len = interpreter->tensor(output_idx_list[0])->dims->size;
+ std::reverse_copy(interpreter->tensor(output_idx_list[0])->dims->data,
+ interpreter->tensor(output_idx_list[0])->dims->data+len, outputDim);
+
+ printf("input %d %d %d %d\n",inputDim[0], inputDim[1], inputDim[2], inputDim[3]);
+ printf("output %d %d %d %d\n",outputDim[0], outputDim[1], outputDim[2], outputDim[3]);
+
+ int output_number_of_pixels = 1;
+
+ int wanted_channels = inputDim[0];
+ int wanted_height = inputDim[1];
+ int wanted_width = inputDim[2];
+
+ for(int k=0;k<4;k++)
+ output_number_of_pixels *= inputDim[k];
+
+ int input=interpreter->inputs()[0];
+
+ for(int i=0;i<TOTAL_LABEL_SIZE;i++){
+ std::string path = data_path;
+ path += total_label[i];
+ printf("\n[%s]\n", path.c_str());
+
+ for(int j=0;j<TOTAL_DATA_SIZE;j++){
+ std::string img=path+"/";
+ img += total_label[i]+std::to_string(j+1)+".bmp";
+ printf("%s\n",img.c_str());
+
+ uint8_t *in;
+ float *output;
+ in = tflite::label_image::read_bmp(img, &wanted_width, &wanted_height, &wanted_channels);
+
+ if(interpreter->AllocateTensors() != kTfLiteOk){
+ std::cout << "Failed to allocate tnesors!" <<std::endl;
+ return -2;
+ }
+
+ for(int l=0;l<output_number_of_pixels;l++){
+ (interpreter->typed_tensor<float>(input))[l] =
+ ((float) in[l]-127.5f)/127.5f;
+ }
+
+ if(interpreter->Invoke() != kTfLiteOk) {
+ std::cout << "Failed to invoke!" <<std::endl;
+ return -3;
+ }
+
+ output = interpreter->typed_output_tensor<float>(0);
+
+ std::copy(output, output+128, out[i][j]);
+
+ }
+ }
+
+ for(int i=0;i<TOTAL_LABEL_SIZE;i++){
+ for(int j=0;j<TOTAL_DATA_SIZE;j++){
+ std::string out_file="/sdcard/Transfer-Learning/";
+ out_file += total_label[i]+std::to_string(j+1)+".txt";
+ printf("%s\n",out_file.c_str());
+ std::ofstream writeFile(out_file.data());
+ if(writeFile.is_open()){
+ for(int k=0;k<128;k++)
+ writeFile << out[i][j][k]<<std::endl;
+ writeFile.close();
+ }
+ }
+ }
+
+
+ float testout[TOTAL_TEST_SIZE][128];
+
+ for(int i=0;i<TOTAL_TEST_SIZE;i++){
+ std::string path = data_path;
+ path += "testset";
+ printf("\n[%s]\n", path.c_str());
+
+ std::string img = path+"/";
+ img += "test" + std::to_string(i+1)+".bmp";
+ printf("%s\n", img.c_str());
+
+ uint8_t *in;
+ float *output;
+ in=tflite::label_image::read_bmp(img, &wanted_width, &wanted_height, &wanted_channels);
+
+ if(interpreter->AllocateTensors() != kTfLiteOk){
+ std::cout << "Failed to allocate tnesors!" <<std::endl;
+ return -2;
+ }
+
+ for(int l=0;l<output_number_of_pixels;l++){
+ (interpreter->typed_tensor<float>(input))[l] =
+ ((float) in[l]-127.5f)/127.5f;
+ }
+
+ if(interpreter->Invoke() != kTfLiteOk) {
+ std::cout << "Failed to invoke!" <<std::endl;
+ return -3;
+ }
+
+ output = interpreter->typed_output_tensor<float>(0);
+ std::copy(output, output+128, testout[i]);
+
+ int ret=0;
+
+ ret=KNN(out, testout[i]);
+ printf("class %d\n", ret);
+ }
+
+ delete[] input_idx_list;
+ delete[] output_idx_list;
+
+ return 0;
+}
--- /dev/null
+#include <iostream>
+#include <fstream>
+#include <stdlib.h>
+#include <time.h>
+#include <cmath>
+#include "tensorflow/contrib/lite/interpreter.h"
+#include "tensorflow/contrib/lite/kernels/register.h"
+#include "tensorflow/contrib/lite/string_util.h"
+#include "tensorflow/contrib/lite/model.h"
+#include "tensorflow/contrib/lite/tools/gen_op_registration.h"
+#include "bitmap_helpers.h"
+
+#include "matrix.h"
+#define TOTAL_DATA_SIZE 5
+#define TOTAL_LABEL_SIZE 3
+#define TOTAL_TEST_SIZE 8
+#define ITERATION 300
+
+using namespace std;
+
+string data_path="/sdcard/Transfer-Learning/";
+
+Matrix X, W1, H, W2, Y, B1, B2, Y2, dJdB1, dJdB2, dJdW1, dJdW2;
+double learningRate;
+double loss = 0.0;
+
+double random(double x){
+ return (double)(rand() %10000 +1)/10000-0.5;
+}
+
+double sigmoid(double x){
+ return 1/(1+exp(-x));
+}
+
+double sigmoidePrime(double x){
+ return exp(-x)/(pow(1+exp(-x),2));
+}
+
+double stepFunction(double x){
+ if(x>0.9){
+ return 1.0;
+ }
+
+ if(x<0.1){
+ return 0.0;
+ }
+
+ return x;
+}
+
+
+void init(int inputNeuron, int hiddenNeuron, int outputNeuron, double rate){
+ learningRate = rate;
+ W1=Matrix(inputNeuron, hiddenNeuron);
+ W2=Matrix(hiddenNeuron, outputNeuron);
+ B1=Matrix(1, hiddenNeuron);
+ B2=Matrix(1, outputNeuron);
+
+ W1=W1.applyFunction(random);
+ W2=W2.applyFunction(random);
+ B1=B1.applyFunction(random);
+ B2=B2.applyFunction(random);
+
+}
+
+Matrix computeOutput(vector<double> input){
+ X = Matrix({input});
+ H=X.dot(W1).add(B1).applyFunction(sigmoid);
+ Y=H.dot(W2).add(B2).applyFunction(sigmoid);
+ return Y;
+}
+
+void learn(vector<double> expectedOutput){
+ Matrix Yt=Matrix({expectedOutput});
+ double l = sqrt((Yt.subtract(Y)).multiply(Yt.subtract(Y)).sum())*1.0/2.0;
+ if(l > loss) loss = l;
+
+ Y2=Matrix({expectedOutput});
+
+ dJdB2=Y.subtract(Y2).multiply(H.dot(W2).add(B2).applyFunction(sigmoidePrime));
+ dJdB1=dJdB2.dot(W2.transpose()).multiply(X.dot(W1).add(B1).applyFunction(sigmoidePrime));
+ dJdW2=H.transpose().dot(dJdB2);
+ dJdW1=X.transpose().dot(dJdB1);
+
+ W1=W1.subtract(dJdW1.multiply(learningRate));
+ W2=W2.subtract(dJdW2.multiply(learningRate));
+ B1=B1.subtract(dJdB1.multiply(learningRate));
+ B2=B2.subtract(dJdB2.multiply(learningRate));
+}
+
+void getFeature(const string filename, vector<double>&feature_input){
+ int tensor_size;
+ int node_size;
+ int input_size;
+ int output_size;
+ int *output_idx_list;
+ int *input_idx_list;
+ int inputDim[4];
+ int outputDim[4];
+ int input_idx_list_len=0;
+ int output_idx_list_len=0;
+
+ std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile("/sdcard/Transfer-Learning/ssd_mobilenet_v2_coco_feature.tflite");
+
+ assert(model != NULL);
+ tflite::ops::builtin::BuiltinOpResolver resolver;
+ std::unique_ptr<tflite::Interpreter> interpreter;
+ tflite::InterpreterBuilder(*model.get(), resolver)(&interpreter);
+
+ tensor_size=interpreter->tensors_size();
+ node_size = interpreter->nodes_size();
+ input_size = interpreter->inputs().size();
+ output_size = interpreter->outputs().size();
+
+ input_idx_list = new int[input_size];
+ output_idx_list = new int[output_size];
+
+ int t_size = interpreter->tensors_size();
+ for(int i=0;i<t_size;i++){
+ for(int j=0;j<input_size;j++){
+ if(strcmp(interpreter->tensor(i)->name, interpreter->GetInputName(j)) == 0)
+ input_idx_list[input_idx_list_len++]=i;
+ }
+ for(int j=0;j<output_size;j++){
+ if(strcmp(interpreter->tensor(i)->name, interpreter->GetOutputName(j)) == 0)
+ output_idx_list[output_idx_list_len++]=i;
+ }
+ }
+ for(int i=0;i<4;i++){
+ inputDim[i]=1;
+ outputDim[i]=1;
+ }
+
+ int len = interpreter->tensor(input_idx_list[0])->dims->size;
+ std::reverse_copy(interpreter->tensor(input_idx_list[0])->dims->data,
+ interpreter->tensor(input_idx_list[0])->dims->data+len, inputDim);
+ len = interpreter->tensor(output_idx_list[0])->dims->size;
+ std::reverse_copy(interpreter->tensor(output_idx_list[0])->dims->data,
+ interpreter->tensor(output_idx_list[0])->dims->data+len, outputDim);
+
+ int output_number_of_pixels=1;
+ int wanted_channels = inputDim[0];
+ int wanted_height=inputDim[1];
+ int wanted_width = inputDim[2];
+
+ for(int k=0;k<4;k++)
+ output_number_of_pixels *= inputDim[k];
+
+ int _input = interpreter->inputs()[0];
+
+ uint8_t *in;
+ float* output;
+ in=tflite::label_image::read_bmp(filename,&wanted_width, &wanted_height, &wanted_channels);
+ if(interpreter->AllocateTensors() != kTfLiteOk){
+ std::cout << "Failed to allocate tensors!"<<std::endl;
+ exit(0);
+ }
+
+ for(int l=0;l<output_number_of_pixels;l++){
+ (interpreter->typed_tensor<float>(_input))[l] =
+ ((float) in[l]-127.5f)/127.5f;
+ }
+
+ if(interpreter->Invoke()!=kTfLiteOk){
+ std::cout <<"Failed to invoke!"<<std::endl;
+ exit(0);
+ }
+
+ output = interpreter->typed_output_tensor<float>(0);
+
+ for(int l=0;l<128;l++){
+ feature_input[l]=output[l];
+ }
+
+ delete[] input_idx_list;
+ delete[] output_idx_list;
+}
+
+void ExtractFeatures(const char* path, vector<vector<double>>&feature_input, vector<vector<double>>&feature_output){
+ string total_label[TOTAL_LABEL_SIZE]={"happy", "sad", "soso"};
+
+
+ int trainingSize = TOTAL_LABEL_SIZE * TOTAL_DATA_SIZE;
+
+ feature_input.resize(trainingSize);
+ feature_output.resize(trainingSize);
+
+ int count=0;
+
+ for(int i=0;i<TOTAL_LABEL_SIZE;i++){
+ std::string path = data_path;
+ path += total_label[i];
+
+ for(int j=0;j<TOTAL_DATA_SIZE;j++){
+ std::string img = path+"/";
+ img += total_label[i]+std::to_string(j+1)+".bmp";
+ printf("%s\n",img.c_str());
+
+ feature_input[count].resize(128);
+
+ getFeature(img, feature_input[count]);
+ feature_output[count].resize(TOTAL_LABEL_SIZE);
+ feature_output[count][i]=1;
+ count++;
+ }
+ }
+
+}
+
+int main(int argc, char*argv[]){
+
+ srand(time(NULL));
+
+ std::vector<std::vector<double>> inputVector, outputVector;
+ ExtractFeatures("/sdcard/Transfer-Learning/",inputVector, outputVector);
+
+ init(128,20,TOTAL_LABEL_SIZE,0.7);
+
+ for(int i=0;i<ITERATION;i++){
+ for(int j=0; j<inputVector.size();j++){
+ computeOutput(inputVector[j]);
+ learn(outputVector[j]);
+ }
+ cout<<"#"<<i+1<<"/"<<ITERATION<< " - Loss : "<< loss<< endl;
+ loss = 0.0;
+ }
+
+ for(int i=0;i<TOTAL_TEST_SIZE;i++){
+ std::string path = data_path;
+ path += "testset";
+ printf("\n[%s]\n", path.c_str());
+ std::string img=path+"/";
+ img += "test" + std::to_string(i+1)+".bmp";
+ printf("%s\n",img.c_str());
+
+ std::vector<double> featureVector, resultVector;
+ featureVector.resize(128);
+ getFeature(img, featureVector);
+ cout << computeOutput(featureVector).applyFunction(stepFunction)<<endl;
+ }
+}
--- /dev/null
+#include "matrix.h"
+#include <assert.h>
+#include <sstream>
+
+Matrix::Matrix(){}
+
+Matrix::Matrix(int height, int width){
+ this->height = height;
+ this->width = width;
+ this->array = std::vector <std::vector<double>>(height, std::vector<double>(width));
+}
+
+Matrix::Matrix(std::vector<std::vector<double>>const &array)
+{
+ assert (array.size()!=0);
+ this->height = array.size();
+ this->width = array[0].size();
+ this->array = array;
+}
+
+Matrix Matrix::multiply(double const &value){
+ Matrix result(height,width);
+ int i,j;
+
+ for(i =0;i<height; i++){
+ for(j=0;j<width;j++){
+ result.array[i][j]=array[i][j]*value;
+ }
+ }
+
+ return result;
+}
+
+Matrix Matrix::add(Matrix const &m) const {
+ assert(height = m.height && width = m.width);
+
+ Matrix result(height, width);
+ int i,j;
+ for(i=0;i<height;i++){
+ for(j=0;j<width;j++){
+ result.array[i][j] = array[i][j] + m.array[i][j];
+ }
+ }
+ return result;
+}
+
+Matrix Matrix::subtract(Matrix const &m) const{
+ assert(height=m.height && width=m.width);
+ Matrix result(height,width);
+ int i,j;
+
+ for(i=0;i<height;i++){
+ for(j=0;j<width;j++){
+ result.array[i][j] = array[i][j] - m.array[i][j];
+ }
+ }
+
+ return result;
+}
+
+Matrix Matrix::multiply(Matrix const &m)const{
+ assert(height=m.height && width=m.width);
+ Matrix result (height, width);
+
+ int i,j;
+
+ for(i=0;i<height;i++){
+ for(j=0;j<width;j++){
+ result.array[i][j] = array[i][j] * m.array[i][j];
+ }
+ }
+
+ return result;
+}
+
+double Matrix::sum() const{
+ int i,j;
+ double ret=0.0;
+ for(i=0;i<height;i++){
+ for(j=0;j<width;j++){
+ ret += array[i][j];
+ }
+ }
+ return ret;
+}
+
+Matrix Matrix::dot(Matrix const &m) const{
+ assert(width=m.height);
+ int i,j,h, mwidth = m.width;
+ double w=0;
+
+ Matrix result(height,mwidth);
+
+ for(i=0;i<height;i++){
+ for(j=0;j<mwidth;j++){
+ for(h=0;h<width;h++){
+ w += array[i][h]*m.array[h][j];
+ }
+ result.array[i][j] = w;
+ w=0;
+ }
+ }
+
+ return result;
+}
+
+Matrix Matrix::transpose()const{
+ Matrix result(width, height);
+ int i,j;
+ for(i=0;i<width;i++){
+ for(j=0;j<height;j++){
+ result.array[i][j] = array[j][i];
+ }
+ }
+ return result;
+}
+
+Matrix Matrix::applyFunction(double(*function)(double))const{
+ Matrix result(height, width);
+ int i,j;
+
+ for(i=0;i<height;i++){
+ for(j=0;j<width;j++){
+ result.array[i][j] = (*function)(array[i][j]);
+ }
+ }
+
+ return result;
+}
+
+void Matrix::print(std::ostream &flux) const{
+ int i,j;
+ int maxLength[width];
+ std::stringstream ss;
+
+ for(i=0;i<width;i++){
+ maxLength[i]=0;
+ }
+
+ for(i=0;i<height;i++){
+ for(j=0;j<width;j++){
+ ss<<array[i][j];
+ if(maxLength[j] < ss.str().size()){
+ maxLength[j]=ss.str().size();
+ }
+ ss.str(std::string());
+ }
+ }
+
+
+ for(i=0;i<height;i++){
+ for(j=0;j<width;j++){
+ flux<<array[i][j];
+ ss<<array[i][j];
+
+ for(int k=0; k<maxLength[j]-ss.str().size()+1;k++){
+ flux<<" ";
+ }
+ ss.str(std::string());
+ }
+ flux<<std::endl;
+ }
+}
+
+std::ostream& operator<<(std::ostream & flux, Matrix const &m){
+ m.print(flux);
+ return flux;
+}
+
+
+
+
+
--- /dev/null
+#ifndef MATRIX_H
+#define MATRIX_H
+
+
+#include <vector>
+#include <iostream>
+
+class Matrix{
+ public:
+ Matrix();
+ Matrix(int height, int width);
+ Matrix(std::vector<std::vector<double>> const &array);
+
+ Matrix multiply(double const &value);
+
+ Matrix add(Matrix const &m) const;
+ Matrix subtract(Matrix const &m) const;
+ Matrix multiply(Matrix const &m) const;
+
+
+ Matrix dot(Matrix const &m) const;
+ Matrix transpose() const;
+ double sum() const;
+
+ Matrix applyFunction(double (*function)(double)) const;
+
+ void print(std::ostream &flux) const;
+
+ private:
+ std::vector<std::vector<double>> array;
+ int height;
+ int width;
+};
+
+std::ostream& operator<<(std::ostream &flux, Matrix const &m);
+
+#endif
--- /dev/null
+#!/usr/bin/env bash
+#currently we are using tensorflow 1.9.0
+VERSION="1.9.0"
+
+#Get tensorflow
+if [ ! -d "tensorflow-${VERSION}" ]; then
+ if [ ! -f "v${VERSION}.tar.gz" ]; then
+ echo "[TENSORFLOW-LITE] Download tensorflow-${VERSION}\n"
+ wget "https://github.com/tensorflow/tensorflow/archive/v${VERSION}.tar.gz"
+ echo "[TENSORFLOW-LITE] Finish Downloading tensorflow-${VERSION}\n"
+ echo "[TENSORFLOW-LITE] untar tensorflow-${VERSION}\n"
+ fi
+ tar xf "v${VERSION}.tar.gz"
+fi
+
+if [ ! -d "tensorflow-${VERSION}/tensorflow/contrib/lite/downloads" ]; then
+#Download Dependencys
+ pushd "tensorflow-${VERSION}"
+ echo "[TENSORFLOW-LITE] Download external libraries of tensorflow-${VERSION}\n"
+ sed -i "s|flatbuffers/archive/master.zip|flatbuffers/archive/v1.8.0.zip|g" tensorflow/contrib/lite/download_dependencies.sh
+ ./tensorflow/contrib/lite/download_dependencies.sh
+ popd
+fi