--- /dev/null
+# Layers
+
+In this example, we demonstrate training Single Layers model with dummy dataset.
+You just Run Simple Layer example with ```.ini``` file. (it automatically config input&output size.)
+if you want to modify input&output size you just modify ```.ini``` file
+
+
+## Layer List
+
+| No | Layer Type | Model Summary |
+| --- | ----------------------- | ----------------------------- |
+| 1 | Linear(Fully connected) | -> FC -> |
+| 2 | Convolution | -> Conv -> |
+| 3 | LSTM | -> LSTM -> |
+| 4 | Model_A_Linear | -> FC -> FC -> FC -> |
+| 5 | Model_A_Conv | -> Conv -> Conv -> Conv -> |
+| 6 | Model_C_Linear | -> FC -> RELU -> Flatten -> |
+| 7 | Model_C_Conv | -> Conv -> RELU -> Flatten -> |
+
+## How to Run
+
+### 1. NNTrainer
+Build with meson, ninja
+
+In "nntrainer dir"
+```.bash
+meson build
+```
+
+In "nntrainer/build dir"
+```.bash
+ninja
+```
+
+In "nntrainer/build dir"
+```.bash
+./Applications/Layers/jni/nntrainer_Layers ../Applications/Layers/res/{ini file}
+```
+
+### 2. Pytorch, Tensorflow
+
+We Provide Pytorch, Tensorflow example with same model code, you can test model in ```./PyTorch```, ```./Tensorflow``` dir and you can Run with
+```.bash
+python3 ./PyTorch/{LayerName}.py
+python3 ./Tensorflow/{LayerName}.py
+```
--- /dev/null
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# ndk path
+ifndef ANDROID_NDK
+$(error ANDROID_NDK is not defined!)
+endif
+
+ifndef NNTRAINER_ROOT
+NNTRAINER_ROOT := $(LOCAL_PATH)/../../..
+endif
+
+ ML_API_COMMON_INCLUDES := ${NNTRAINER_ROOT}/ml_api_common/include
+NNTRAINER_INCLUDES := $(NNTRAINER_ROOT)/nntrainer \
+ $(NNTRAINER_ROOT)/nntrainer/dataset \
+ $(NNTRAINER_ROOT)/nntrainer/models \
+ $(NNTRAINER_ROOT)/nntrainer/graph \
+ $(NNTRAINER_ROOT)/nntrainer/layers \
+ $(NNTRAINER_ROOT)/nntrainer/compiler \
+ $(NNTRAINER_ROOT)/nntrainer/optimizers \
+ $(NNTRAINER_ROOT)/nntrainer/tensor \
+ $(NNTRAINER_ROOT)/nntrainer/utils \
+ $(NNTRAINER_ROOT)/api \
+ $(NNTRAINER_ROOT)/api/ccapi/include \
+ ${ML_API_COMMON_INCLUDES}
+
+LOCAL_MODULE := nntrainer
+LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libnntrainer.so
+
+include $(PREBUILT_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := ccapi-nntrainer
+LOCAL_SRC_FILES := $(NNTRAINER_ROOT)/libs/$(TARGET_ARCH_ABI)/libccapi-nntrainer.so
+
+include $(PREBUILT_SHARED_LIBRARY)
+
+include $(CLEAR_VARS)
+
+CIFARDIR = ../../utils/datagen/cifar
+
+LOCAL_ARM_NEON := true
+LOCAL_CFLAGS += -std=c++17 -Ofast -mcpu=cortex-a53 -Ilz4-nougat/lib
+LOCAL_LDFLAGS += -Llz4-nougat/lib/obj/local/$(TARGET_ARCH_ABI)/
+LOCAL_CXXFLAGS += -std=c++17 -frtti
+LOCAL_CFLAGS += -pthread -fexceptions -fopenmp
+LOCAL_LDFLAGS += -fexceptions
+LOCAL_MODULE_TAGS := optional
+LOCAL_ARM_MODE := arm
+LOCAL_MODULE := nntrainer_Layers
+LOCAL_LDLIBS := -llog -landroid -fopenmp
+
+LOCAL_SRC_FILES := main.cpp $(CIFARDIR)/cifar_dataloader.cpp
+
+LOCAL_SHARED_LIBRARIES := nntrainer ccapi-nntrainer
+
+LOCAL_C_INCLUDES += $(NNTRAINER_INCLUDES) $(CIFARDIR)
+
+include $(BUILD_EXECUTABLE)
--- /dev/null
+APP_ABI := arm64-v8a
+APP_STL := c++_shared
+APP_PLATFORM := android-29
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2023 DongHak Park <donghak.park@samsung.com>
+ *
+ * @file main.cpp
+ * @date 26 Jan 2023
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Donghak Park <donghak.park@samsung.com>
+ * @bug No known bugs except for NYI items
+ * @brief This is Layer Example with ini file
+ *
+ */
+
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <stdlib.h>
+#include <vector>
+
+#include <cifar_dataloader.h>
+#include <layer.h>
+#include <model.h>
+#include <optimizer.h>
+
+using LayerHandle = std::shared_ptr<ml::train::Layer>;
+using ModelHandle = std::unique_ptr<ml::train::Model>;
+using UserDataType = std::unique_ptr<nntrainer::util::DataLoader>;
+
+unsigned int DATA_SIZE;
+unsigned int BATCH_SIZE;
+unsigned int INPUT_SHAPE[3];
+unsigned int OUTPUT_SHAPE[3];
+unsigned int seed;
+
+float training_loss = 0.0;
+float last_batch_loss = 0.0;
+
+int trainData_cb(float **input, float **label, bool *last, void *user_data) {
+ auto data = reinterpret_cast<nntrainer::util::DataLoader *>(user_data);
+ data->next(input, label, last);
+ return 0;
+}
+
+std::array<UserDataType, 1> createFakeDataGenerator(unsigned int batch_size) {
+
+ UserDataType train_data(new nntrainer::util::RandomDataLoader(
+ {{batch_size, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2]}},
+ {{batch_size, OUTPUT_SHAPE[0], OUTPUT_SHAPE[1], OUTPUT_SHAPE[2]}},
+ DATA_SIZE));
+
+ return {std::move(train_data)};
+}
+
+int main(int argc, char *argv[]) {
+ int status = 0;
+ seed = time(NULL);
+ srand(seed);
+
+ if (argc < 2) {
+ std::cout << "Usage: " << argv[0] << " <model_config>" << std::endl;
+ return -1;
+ }
+
+ auto config = argv[1];
+
+ std::unique_ptr<ml::train::Model> model;
+
+ try {
+ model = createModel(ml::train::ModelType::NEURAL_NET);
+ } catch (std::exception &e) {
+ std::cerr << "Error while creating model! details: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ try {
+ model->load(config, ml::train::ModelFormat::MODEL_FORMAT_INI);
+ } catch (std::exception &e) {
+ std::cerr << "Error while loading model! details: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ try {
+ model->compile();
+ } catch (std::exception &e) {
+ std::cerr << "Error while compiling model! details: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ try {
+ model->initialize();
+ } catch (std::exception &e) {
+ std::cerr << "Error while initializing model! details: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ auto input_dim = model->getInputDimension();
+ auto output_dim = model->getOutputDimension();
+
+ INPUT_SHAPE[0] = input_dim[0].channel();
+ INPUT_SHAPE[1] = input_dim[0].height();
+ INPUT_SHAPE[2] = input_dim[0].width();
+ OUTPUT_SHAPE[0] = output_dim[0].channel();
+ OUTPUT_SHAPE[1] = output_dim[0].height();
+ OUTPUT_SHAPE[2] = output_dim[0].width();
+ DATA_SIZE = input_dim[0].batch();
+ BATCH_SIZE = input_dim[0].batch();
+
+ std::array<UserDataType, 1> user_datas;
+
+ try {
+ user_datas = createFakeDataGenerator(DATA_SIZE);
+ } catch (std::exception &e) {
+ std::cerr << "uncaught error while creating data generator! details: "
+ << e.what() << std::endl;
+ return 1;
+ }
+
+ auto &[train_user_data] = user_datas;
+
+ std::unique_ptr<ml::train::Dataset> dataset_train;
+ try {
+ dataset_train = ml::train::createDataset(
+ ml::train::DatasetType::GENERATOR, trainData_cb, train_user_data.get());
+ } catch (std::exception &e) {
+ std::cerr << "uncaught error while creating dataset! details: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ try {
+ model->setDataset(ml::train::DatasetModeType::MODE_TRAIN,
+ std::move(dataset_train));
+ } catch (std::exception &e) {
+ std::cerr << "uncaught error while setting dataset! details: " << e.what()
+ << std::endl;
+ return 1;
+ }
+ try {
+ model->train();
+ training_loss = model->getTrainingLoss();
+ last_batch_loss = model->getLoss();
+
+ } catch (std::exception &e) {
+ std::cerr << "uncaught error while training! details: " << e.what()
+ << std::endl;
+ return 1;
+ }
+
+ std::cout << "Training is finished" << std::endl;
+
+ return status;
+}
--- /dev/null
+build_root = meson.build_root()
+res_path = meson.current_source_dir() / '..' / 'res'
+
+nntr_Layers_resdir = nntr_app_resdir / 'Layers'
+run_command('cp', '-lr', res_path, nntr_Layers_resdir)
+
+Layers_sources = [
+ 'main.cpp',
+ cifar_path / 'cifar_dataloader.cpp'
+]
+
+executable('nntrainer_Layers',
+ Layers_sources,
+ dependencies: [iniparser_dep, nntrainer_dep, nntrainer_ccapi_dep, app_utils_dep],
+ include_directories: [include_directories('.'), cifar_include_dir],
+ install: get_option('install-app'),
+ install_dir: application_install_dir
+)
--- /dev/null
+
+[Model]
+Type = NeuralNetwork
+Epochs = 100
+Loss = mse
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4
+
+[inputlayer]
+Type = input
+Input_Shape = 3:224:224
+
+[conv2d_c1_layer]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
--- /dev/null
+
+[Model]
+Type = NeuralNetwork
+Epochs = 100
+Loss = mse
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:150528
+
+[LSTMLayer]
+Type = lstm
+input_layers = inputlayer
+Unit = 10
--- /dev/null
+
+[Model]
+Type = NeuralNetwork
+Epochs = 10
+Loss = mse
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:150528
+
+[outputlayer]
+Type = fully_connected
+input_layers=inputlayer
+Unit = 300
--- /dev/null
+[Model]
+Type = NeuralNetwork
+Epochs = 100
+Loss = mse
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4
+
+[inputlayer]
+Type = input
+Input_Shape = 3:224:224
+
+[conv2d_c1_layer]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
+
+[Cov2]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
+
+[Cov3]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
--- /dev/null
+[Model]
+Type = NeuralNetwork
+Epochs = 10
+Loss = mse
+batch_size = 2048
+
+[Optimizer]
+Type = sgd
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:784
+
+[FC1]
+Type = fully_connected
+Unit = 4096
+
+[FC2]
+Type = fully_connected
+Unit = 2048
+
+[outputlayer]
+Type = fully_connected
+Unit = 100
--- /dev/null
+[Model]
+Type = NeuralNetwork
+Epochs = 100
+Loss = mse
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4
+
+[inputlayer]
+Type = input
+Input_Shape = 3:224:224
+
+[conv2d_c1_layer]
+Type = conv2d
+kernel_size = 3,3
+filters = 3
+stride = 2,2
+padding = "same"
+Activation = relu
+
+[Cov2]
+Type = Flatten
--- /dev/null
+
+[Model]
+Type = NeuralNetwork
+Epochs = 100
+Loss = mse
+batch_size = 64
+
+[Optimizer]
+Type = sgd
+
+[LearningRateScheduler]
+type=constant
+Learning_rate = 1e-4
+
+[inputlayer]
+Type = input
+Input_Shape = 1:1:150528
+
+[FC1]
+Type = fully_connected
+Activation = relu
+Unit = 10
+
+[outputlayer]
+Type = Flatten
#### [Alexnet Example](https://github.com/nnstreamer/nntrainer/tree/main/Applications/AlexNet)
An example to train Alexnet(Fused) network with CIFAR100 Dataset
+
+#### [Layers Example](https://github.com/nnstreamer/nntrainer/tree/main/Applications/Layers)
+
+An example to train Single Layers with Dummy Dataset
subdir('Custom')
subdir('ProductRatings/jni')
subdir('AlexNet/jni')
+subdir('Layers/jni')
if get_option('enable-tflite-backbone')
subdir('SimpleShot')
endif