[Application] Add Simple Layers NNtrainer example(FC, LSTM, Conv)
authorDongHak Park <donghak.park@samsung.com>
Wed, 1 Feb 2023 05:41:29 +0000 (14:41 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Tue, 7 Feb 2023 22:45:17 +0000 (07:45 +0900)
Add Simple Layers NNtrainer example(FC, LSTM, Conv)  with Dummy Data

Add Single Layer example
- Linear(Fully-Connected)
- Conv
- LSTM
- Model_A_Linear
- Model_A_Conv
- Model_C_Linear
- Model_C_Conv

We conduct Memory & Latency Benchmark test based on these code
It's Loss possibly show inf cause It's DataSet is Dummy If you want to get actual Loss set your own Dataset
- It support only training if user want they can set dataset, validation loss, test loss... etc

Signed-off-by: DongHak Park <donghak.park@samsung.com>
14 files changed:
Applications/Layers/README.md [new file with mode: 0644]
Applications/Layers/jni/Android.mk [new file with mode: 0644]
Applications/Layers/jni/Application.mk [new file with mode: 0644]
Applications/Layers/jni/main.cpp [new file with mode: 0644]
Applications/Layers/jni/meson.build [new file with mode: 0644]
Applications/Layers/res/Conv.ini [new file with mode: 0644]
Applications/Layers/res/LSTM.ini [new file with mode: 0644]
Applications/Layers/res/Linear.ini [new file with mode: 0644]
Applications/Layers/res/Model_A_Conv.ini [new file with mode: 0644]
Applications/Layers/res/Model_A_Linear.ini [new file with mode: 0644]
Applications/Layers/res/Model_C_Conv.ini [new file with mode: 0644]
Applications/Layers/res/Model_C_Linear.ini [new file with mode: 0644]
Applications/README.md
Applications/meson.build

diff --git a/Applications/Layers/README.md b/Applications/Layers/README.md
new file mode 100644 (file)
index 0000000..bf16cac
--- /dev/null
@@ -0,0 +1,46 @@
+# Layers
+
+In this example, we demonstrate training Single Layers model with dummy dataset.  
+You just Run Simple Layer example with ```.ini``` file.  (it automatically config input&output size.)  
+if you want to modify input&output size you just modify ```.ini``` file
+
+
+## Layer List
+
+| No  | Layer Type              | Model Summary                 |
+| --- | ----------------------- | ----------------------------- |
+| 1   | Linear(Fully connected) | -> FC ->                      |
+| 2   | Convolution             | -> Conv ->                    |
+| 3   | LSTM                    | -> LSTM ->                    |
+| 4   | Model_A_Linear          | -> FC -> FC -> FC ->          |
+| 5   | Model_A_Conv            | -> Conv -> Conv -> Conv ->    |
+| 6   | Model_C_Linear          | -> FC -> RELU -> Flatten ->   |
+| 7   | Model_C_Conv            | -> Conv -> RELU -> Flatten -> |
+
+## How to Run
+
+### 1. NNTrainer
+Build with meson, ninja
+
+In "nntrainer dir"
+```.bash
+meson build
+```
+
+In "nntrainer/build dir"
+```.bash
+ninja
+```
+
+In "nntrainer/build dir"
+```.bash
+./Applications/Layers/jni/nntrainer_Layers ../Applications/Layers/res/{ini file}
+```
+
+### 2. Pytorch, Tensorflow
+
+We Provide Pytorch, Tensorflow example with same model code, you can test model in ```./PyTorch```, ```./Tensorflow``` dir and you can Run with 
+```.bash
+python3 ./PyTorch/{LayerName}.py
+python3 ./Tensorflow/{LayerName}.py
+```
diff --git a/Applications/Layers/jni/Android.mk b/Applications/Layers/jni/Android.mk
new file mode 100644 (file)
index 0000000..bef36d8
--- /dev/null
@@ -0,0 +1,61 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# ndk path
+ifndef ANDROID_NDK
+$(error ANDROID_NDK is not defined!)
+endif
+
+ifndef NNTRAINER_ROOT
+NNTRAINER_ROOT := $(LOCAL_PATH)/../../..
+endif
+
+ ML_API_COMMON_INCLUDES := ${NNTRAINER_ROOT}/ml_api_common/include
+NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer \
+       $(NNTRAINER_ROOT)/nntrainer/dataset \
+       $(NNTRAINER_ROOT)/nntrainer/models \
+       $(NNTRAINER_ROOT)/nntrainer/graph \
+       $(NNTRAINER_ROOT)/nntrainer/layers \
+       $(NNTRAINER_ROOT)/nntrainer/compiler \
+       $(NNTRAINER_ROOT)/nntrainer/optimizers \
+       $(NNTRAINER_ROOT)/nntrainer/tensor \
+       $(NNTRAINER_ROOT)/nntrainer/utils \
+       $(NNTRAINER_ROOT)/api \
+       $(NNTRAINER_ROOT)/api/ccapi/include \
+       ${ML_API_COMMON_INCLUDES} 
+
+LOCAL_MODULE := nntrainer
+LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libnntrainer.so
+
+include $(PREBUILT_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := ccapi-nntrainer
+LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libccapi-nntrainer.so
+
+include $(PREBUILT_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+CIFARDIR = ../../utils/datagen/cifar
+
+LOCAL_ARM_NEON := true
+LOCAL_CFLAGS += -std=c++17 -Ofast -mcpu=cortex-a53 -Ilz4-nougat/lib
+LOCAL_LDFLAGS += -Llz4-nougat/lib/obj/local/$(TARGET_ARCH_ABI)/
+LOCAL_CXXFLAGS += -std=c++17 -frtti
+LOCAL_CFLAGS += -pthread -fexceptions -fopenmp
+LOCAL_LDFLAGS += -fexceptions
+LOCAL_MODULE_TAGS := optional
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := nntrainer_Layers
+LOCAL_LDLIBS := -llog -landroid -fopenmp
+
+LOCAL_SRC_FILES := main.cpp $(CIFARDIR)/cifar_dataloader.cpp
+
+LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
+
+LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES) $(CIFARDIR)
+
+include $(BUILD_EXECUTABLE)
diff --git a/Applications/Layers/jni/Application.mk b/Applications/Layers/jni/Application.mk
new file mode 100644 (file)
index 0000000..659caaf
--- /dev/null
@@ -0,0 +1,3 @@
+APP_ABI := arm64-v8a
+APP_STL := c++_shared
+APP_PLATFORM := android-29
diff --git a/Applications/Layers/jni/main.cpp b/Applications/Layers/jni/main.cpp
new file mode 100644 (file)
index 0000000..54e87a5
--- /dev/null
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2023 DongHak Park <donghak.park@samsung.com>
+ *
+ * @file   main.cpp
+ * @date   26 Jan 2023
+ * @see    https://github.com/nnstreamer/nntrainer
+ * @author Donghak Park <donghak.park@samsung.com>
+ * @bug           No known bugs except for NYI items
+ * @brief  This is Layer Example with ini file
+ *
+ */
+
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <stdlib.h>
+#include <vector>
+
+#include <cifar_dataloader.h>
+#include <layer.h>
+#include <model.h>
+#include <optimizer.h>
+
+using LayerHandle = std::shared_ptr<ml::train::Layer>;
+using ModelHandle = std::unique_ptr<ml::train::Model>;
+using UserDataType = std::unique_ptr<nntrainer::util::DataLoader>;
+
+unsigned int DATA_SIZE;
+unsigned int BATCH_SIZE;
+unsigned int INPUT_SHAPE[3];
+unsigned int OUTPUT_SHAPE[3];
+unsigned int seed;
+
+float training_loss = 0.0;
+float last_batch_loss = 0.0;
+
+int trainData_cb(float **input, float **label, bool *last, void *user_data) {
+  auto data = reinterpret_cast<nntrainer::util::DataLoader *>(user_data);
+  data->next(input, label, last);
+  return 0;
+}
+
+std::array<UserDataType, 1> createFakeDataGenerator(unsigned int batch_size) {
+
+  UserDataType train_data(new nntrainer::util::RandomDataLoader(
+    {{batch_size, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]}},
+    {{batch_size, OUTPUT_SHAPE[0], OUTPUT_SHAPE[1], OUTPUT_SHAPE[2]}},
+    DATA_SIZE));
+
+  return {std::move(train_data)};
+}
+
+int main(int argc, char *argv[]) {
+  int status = 0;
+  seed = time(NULL);
+  srand(seed);
+
+  if (argc < 2) {
+    std::cout << "Usage: " << argv[0] << " <model_config>" << std::endl;
+    return -1;
+  }
+
+  auto config = argv[1];
+
+  std::unique_ptr<ml::train::Model> model;
+
+  try {
+    model = createModel(ml::train::ModelType::NEURAL_NET);
+  } catch (std::exception &e) {
+    std::cerr << "Error while creating model! details: " << e.what()
+              << std::endl;
+    return 1;
+  }
+
+  try {
+    model->load(config, ml::train::ModelFormat::MODEL_FORMAT_INI);
+  } catch (std::exception &e) {
+    std::cerr << "Error while loading model! details: " << e.what()
+              << std::endl;
+    return 1;
+  }
+
+  try {
+    model->compile();
+  } catch (std::exception &e) {
+    std::cerr << "Error while compiling model! details: " << e.what()
+              << std::endl;
+    return 1;
+  }
+
+  try {
+    model->initialize();
+  } catch (std::exception &e) {
+    std::cerr << "Error while initializing model! details: " << e.what()
+              << std::endl;
+    return 1;
+  }
+
+  auto input_dim = model->getInputDimension();
+  auto output_dim = model->getOutputDimension();
+
+  INPUT_SHAPE[0] = input_dim[0].channel();
+  INPUT_SHAPE[1] = input_dim[0].height();
+  INPUT_SHAPE[2] = input_dim[0].width();
+  OUTPUT_SHAPE[0] = output_dim[0].channel();
+  OUTPUT_SHAPE[1] = output_dim[0].height();
+  OUTPUT_SHAPE[2] = output_dim[0].width();
+  DATA_SIZE = input_dim[0].batch();
+  BATCH_SIZE = input_dim[0].batch();
+
+  std::array<UserDataType, 1> user_datas;
+
+  try {
+    user_datas = createFakeDataGenerator(DATA_SIZE);
+  } catch (std::exception &e) {
+    std::cerr << "uncaught error while creating data generator! details: "
+              << e.what() << std::endl;
+    return 1;
+  }
+
+  auto &[train_user_data] = user_datas;
+
+  std::unique_ptr<ml::train::Dataset> dataset_train;
+  try {
+    dataset_train = ml::train::createDataset(
+      ml::train::DatasetType::GENERATOR, trainData_cb, train_user_data.get());
+  } catch (std::exception &e) {
+    std::cerr << "uncaught error while creating dataset! details: " << e.what()
+              << std::endl;
+    return 1;
+  }
+
+  try {
+    model->setDataset(ml::train::DatasetModeType::MODE_TRAIN,
+                      std::move(dataset_train));
+  } catch (std::exception &e) {
+    std::cerr << "uncaught error while setting dataset! details: " << e.what()
+              << std::endl;
+    return 1;
+  }
+  try {
+    model->train();
+    training_loss = model->getTrainingLoss();
+    last_batch_loss = model->getLoss();
+
+  } catch (std::exception &e) {
+    std::cerr << "uncaught error while training! details: " << e.what()
+              << std::endl;
+    return 1;
+  }
+
+  std::cout << "Training is finished" << std::endl;
+
+  return status;
+}
diff --git a/Applications/Layers/jni/meson.build b/Applications/Layers/jni/meson.build
new file mode 100644 (file)
index 0000000..7e1b0c4
--- /dev/null
@@ -0,0 +1,18 @@
+build_root = meson.build_root()
+res_path = meson.current_source_dir() / '..' / 'res'
+
+nntr_Layers_resdir = nntr_app_resdir / 'Layers'
+run_command('cp', '-lr', res_path, nntr_Layers_resdir)
+
+Layers_sources = [
+  'main.cpp',
+  cifar_path / 'cifar_dataloader.cpp'
+]
+
+executable('nntrainer_Layers',
+  Layers_sources,
+  dependencies: [iniparser_dep, nntrainer_dep, nntrainer_ccapi_dep, app_utils_dep],
+  include_directories: [include_directories('.'), cifar_include_dir],
+  install: get_option('install-app'),
+  install_dir: application_install_dir
+)
diff --git a/Applications/Layers/res/Conv.ini b/Applications/Layers/res/Conv.ini
new file mode 100644 (file)
index 0000000..7d38462
--- /dev/null
@@ -0,0 +1,24 @@
+
+[Model]
+Type = NeuralNetwork               
+Epochs = 100
+Loss = mse                  
+batch_size = 64 
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4
+
+[inputlayer]
+Type = input
+Input_Shape = 3:224:224
+
+[conv2d_c1_layer]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
diff --git a/Applications/Layers/res/LSTM.ini b/Applications/Layers/res/LSTM.ini
new file mode 100644 (file)
index 0000000..7f48348
--- /dev/null
@@ -0,0 +1,22 @@
+
+[Model]
+Type = NeuralNetwork 
+Epochs = 100         
+Loss = mse       
+batch_size = 64   
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4  
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:150528
+
+[LSTMLayer]
+Type = lstm
+input_layers = inputlayer
+Unit = 10
diff --git a/Applications/Layers/res/Linear.ini b/Applications/Layers/res/Linear.ini
new file mode 100644 (file)
index 0000000..4525cdc
--- /dev/null
@@ -0,0 +1,22 @@
+
+[Model]
+Type = NeuralNetwork 
+Epochs = 10
+Loss = mse
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4 
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:150528
+
+[outputlayer]
+Type = fully_connected
+input_layers=inputlayer
+Unit = 300 
diff --git a/Applications/Layers/res/Model_A_Conv.ini b/Applications/Layers/res/Model_A_Conv.ini
new file mode 100644 (file)
index 0000000..d9f1e9d
--- /dev/null
@@ -0,0 +1,37 @@
+[Model]
+Type = NeuralNetwork 
+Epochs = 100  
+Loss = mse   
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4  
+
+[inputlayer]
+Type = input
+Input_Shape = 3:224:224
+
+[conv2d_c1_layer]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
+
+[Cov2]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
+
+[Cov3]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
diff --git a/Applications/Layers/res/Model_A_Linear.ini b/Applications/Layers/res/Model_A_Linear.ini
new file mode 100644 (file)
index 0000000..782c58c
--- /dev/null
@@ -0,0 +1,24 @@
+[Model]
+Type = NeuralNetwork   
+Epochs = 10    
+Loss = mse   
+batch_size = 2048   
+
+[Optimizer]
+Type = sgd
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:784
+
+[FC1]
+Type = fully_connected
+Unit = 4096
+
+[FC2]
+Type = fully_connected
+Unit = 2048
+
+[outputlayer]
+Type = fully_connected
+Unit = 100       
diff --git a/Applications/Layers/res/Model_C_Conv.ini b/Applications/Layers/res/Model_C_Conv.ini
new file mode 100644 (file)
index 0000000..f33b204
--- /dev/null
@@ -0,0 +1,27 @@
+[Model]
+Type = NeuralNetwork     
+Epochs = 100        
+Loss = mse            
+batch_size = 64      
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4         
+
+[inputlayer]
+Type = input
+Input_Shape = 3:224:224
+
+[conv2d_c1_layer]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
+Activation = relu
+
+[Cov2]
+Type = Flatten
diff --git a/Applications/Layers/res/Model_C_Linear.ini b/Applications/Layers/res/Model_C_Linear.ini
new file mode 100644 (file)
index 0000000..23e3b99
--- /dev/null
@@ -0,0 +1,25 @@
+
+[Model]
+Type = NeuralNetwork      
+Epochs = 100        
+Loss = mse            
+batch_size = 64           
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4     
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:150528
+
+[FC1]
+Type = fully_connected
+Activation = relu
+Unit = 10
+
+[outputlayer]
+Type = Flatten
index 3a74496..2ed107f 100644 (file)
@@ -60,3 +60,7 @@ A logistic regression example using NNTrainer.
 #### [Alexnet Example](https://github.com/nnstreamer/nntrainer/tree/main/Applications/AlexNet)
 
 An example to train Alexnet(Fused) network with CIFAR100 Dataset
+
+#### [Layers Example](https://github.com/nnstreamer/nntrainer/tree/main/Applications/Layers)
+
+An example to train Single Layers with Dummy Dataset
index cf50a84..58c9931 100644 (file)
@@ -17,6 +17,7 @@ endif
 subdir('Custom')
 subdir('ProductRatings/jni')
 subdir('AlexNet/jni')
+subdir('Layers/jni')
 if get_option('enable-tflite-backbone')
   subdir('SimpleShot')
 endif