[nnstreamer] Create nnstreamer tensor_trainer subplugin
authorhyunil park <hyunil46.park@samsung.com>
Fri, 2 Dec 2022 06:37:33 +0000 (15:37 +0900)
committerJijoong Moon <jijoong.moon@samsung.com>
Wed, 22 Mar 2023 10:17:14 +0000 (19:17 +0900)
- Create nnstreamer tensor_trainer subplugin
- Create libnnstreamer_trainer_nntrainer.so

Subplugin Receive GstTensorTrainerProperties from nnstreamer tensor_trainer
to create dataset and model. it receive tensor data from tensor_trainer
and train the model.
Subplugin is created by using GstTensorTrainerFramework and tensor_trainer
call create, destory, train and invoke_NN.

**Self evaluation:**
1. Build test: [x]Passed []Failed []Skipped
2. Run test: [x]Passed []Failed []Skipped

Signed-off-by: hyunil park <hyunil46.park@samsung.com>
Applications/TransferLearning/Draw_Classification/jni/meson.build
debian/rules
meson.build
meson_options.txt
nnstreamer/tensor_filter/meson.build
nnstreamer/tensor_trainer/meson.build [new file with mode: 0644]
nnstreamer/tensor_trainer/tensor_trainer_nntrainer.cc [new file with mode: 0644]
nnstreamer/tensor_trainer/tensor_trainer_nntrainer.hh [new file with mode: 0644]
packaging/nntrainer.spec
test/meson.build

index f2757de..8ee3707 100644 (file)
@@ -30,7 +30,7 @@ nntrainer_filter_env = environment()
 nntrainer_filter_env.set('GTEST_OUTPUT', 'xml:@0@/@1@.xml'.format(meson.build_root(), 'app_draw_classification'))
 nntrainer_filter_env.set('NNSTREAMER_FILTERS', meson.build_root() / 'nnstreamer' / 'tensor_filter')
 
-if get_option('enable-nnstreamer-tensor-filter')
+if get_option('enable-nnstreamer-tensor-filter').enabled()
    test('app_draw_classification', e, env: nntrainer_filter_env,
      args: [nntr_draw_resdir / 'Training.ini', nntr_draw_resdir], timeout: 150)
 endif
index 31deec7..eb80f70 100755 (executable)
@@ -37,7 +37,8 @@ override_dh_auto_configure:
                --includedir=include -Dinstall-app=true \
                -Dreduce-tolerance=$(ENABLE_REDUCE_TOLERANCE) \
                -Denable-debug=$(ENABLE_DEBUG) \
-               -Dml-api-support=enabled -Denable-nnstreamer-tensor-filter=true \
+               -Dml-api-support=enabled -Denable-nnstreamer-tensor-filter=enabled \
+                               -Denable-nnstreamer-tensor-trainer=enabled \
                 -Denable-nnstreamer-backbone=true \
                 -Dcapi-ml-common-actual=capi-ml-common \
                 -Dcapi-ml-inference-actual=capi-ml-inference \
index d0ba3a6..6cf660d 100644 (file)
@@ -367,13 +367,24 @@ if get_option('enable-app')
   endif
 endif
 
-if get_option('enable-nnstreamer-tensor-filter')
-  if get_option('platform') == 'android'
-    warning('android nnstreamer-filter is not yet supported, building nnstreamer-filter skipped')
-  else
-    nnstreamer_dep = dependency('nnstreamer', required: true)
-    subdir('nnstreamer/tensor_filter')
-  endif
+if get_option('enable-nnstreamer-tensor-filter').enabled()
+       if get_option('platform') == 'android'
+               warning('android nnstreamer-filter is not yet supported, building nnstreamer-filter skipped')
+       else
+    nnstreamer_filter_dep = dependency('nnstreamer')
+               subdir('nnstreamer/tensor_filter')
+               extra_defines += '-DSUPPORT_NNSREAMER_PLUGIN_FILTER=1'
+       endif
+endif
+
+if get_option('enable-nnstreamer-tensor-trainer').enabled()
+       if get_option('platform') == 'android'
+               warning('android nnstreamer-trainer is not yet supported, building nnstreamer-trainer skipped')
+       else
+    nnstreamer_trainer_dep = dependency('nnstreamer')
+               subdir('nnstreamer/tensor_trainer')
+               extra_defines += '-DSUPPORT_NNSREAMER_PLUGIN_TRAINER=1'
+       endif
 endif
 
 if get_option('platform') == 'android'
index 992b85a..ab57b5a 100644 (file)
@@ -41,5 +41,6 @@ option('enable-openmp', type: 'boolean', value: true)
 # If this is disabled, related options (capi-ml-*) are ignored.
 option('ml-api-support', type: 'feature', value: 'auto')
 # @todo : make them use 'feature' and depend on ml-api-support
-option('enable-nnstreamer-tensor-filter', type: 'boolean', value: false)
+option('enable-nnstreamer-tensor-filter', type: 'feature', value: 'auto')
+option('enable-nnstreamer-tensor-trainer', type: 'feature', value: 'auto')
 option('nnstreamer-subplugin-install-path', type: 'string', value: '/usr/lib/nnstreamer') # where nnstreamer subplugin should be installed
index 9e19da9..36fe4d1 100644 (file)
@@ -13,7 +13,7 @@ gst_dep = dependency('gstreamer-'+gst_api_version)
 
 nntrainer_prefix = get_option('prefix')
 
-nnstreamer_filter_nntrainer_deps = [glib_dep, gmodule_dep, gst_dep, nntrainer_ccapi_dep, nnstreamer_dep]
+nnstreamer_filter_nntrainer_deps = [glib_dep, gmodule_dep, gst_dep, nntrainer_ccapi_dep, nnstreamer_filter_dep]
 
 nnstreamer_libdir = nntrainer_prefix / get_option('libdir')
 subplugin_install_prefix = get_option('nnstreamer-subplugin-install-path')
diff --git a/nnstreamer/tensor_trainer/meson.build b/nnstreamer/tensor_trainer/meson.build
new file mode 100644 (file)
index 0000000..7cf7be9
--- /dev/null
@@ -0,0 +1,36 @@
+trainer_subplugin_sources = ['tensor_trainer_nntrainer.cc']
+
+nnstreamer_trainer_nntrainer_sources = []
+foreach s : trainer_subplugin_sources
+  nnstreamer_trainer_nntrainer_sources += meson.current_source_dir() / s
+endforeach
+
+# TODO: remove gstreamer dependency by updating nnstreamer_plugin_api.h
+gst_api_version = '1.0'
+glib_dep = dependency('glib-2.0')
+gmodule_dep = dependency('gmodule-2.0')
+gst_dep = dependency('gstreamer-'+gst_api_version)
+
+nntrainer_prefix = get_option('prefix')
+
+nnstreamer_trainer_nntrainer_deps = [glib_dep, gmodule_dep, gst_dep, nntrainer_ccapi_dep, nnstreamer_trainer_dep]
+
+nnstreamer_libdir = nntrainer_prefix / get_option('libdir')
+subplugin_install_prefix = get_option('nnstreamer-subplugin-install-path')
+trainer_subplugin_install_dir = subplugin_install_prefix / 'trainers'
+
+shared_library('nnstreamer_trainer_nntrainer',
+  nnstreamer_trainer_nntrainer_sources,
+  dependencies: nnstreamer_trainer_nntrainer_deps,
+  include_directories: [nntrainer_inc, '.'], # '.' shouldn't be installed
+  install: true,
+  install_dir: trainer_subplugin_install_dir
+)
+
+static_library('nnstreamer_trainer_nntrainer',
+  nnstreamer_trainer_nntrainer_sources,
+  dependencies: nnstreamer_trainer_nntrainer_deps,
+  include_directories: [nntrainer_inc, '.'], # '.' shouldn't be installed
+  install: true,
+  install_dir: nnstreamer_libdir
+)
diff --git a/nnstreamer/tensor_trainer/tensor_trainer_nntrainer.cc b/nnstreamer/tensor_trainer/tensor_trainer_nntrainer.cc
new file mode 100644 (file)
index 0000000..130afcb
--- /dev/null
@@ -0,0 +1,458 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * NNStreamer tensor_trainer subplugin for nntrainer
+ * Copyright (C) 2022 Hyunil Park <hyunil46.park@samsung.com>
+ */
+/**
+ * @file   tensor_trainer_nntrainer.cc
+ * @date   02 Dec 2022
+ * @brief  NNStreamer tensor_trainer subplugin
+ * @see    http://github.com/nnstreamer/nnstreamer
+ * @author Hyunil Park <hyunil46.park@samsung.com>
+ * @bug    No known bugs except for NYI items
+ */
+#include "tensor_trainer_nntrainer.hh"
+#include <cstring>
+#include <iostream>
+#include <nntrainer_log.h>
+#include <pthread.h>
+#include <random>
+#include <sys/syscall.h>
+#include <thread>
+#include <unistd.h>
+
+#define UNUSED(expr) \
+  do {               \
+    (void)(expr);    \
+  } while (0)
+
+/**
+ * @brief startup constructor
+ */
+void init_subplugin_nntrainer(void) __attribute__((constructor));
+
+/**
+ * @brief startdown destructor
+ */
+void fini_subplugin_nntrainer(void) __attribute__((destructor));
+
+void nntrainer_thread_func(NNTrainer::NNTrainerTrain *nntrainer) {
+  nntrainer->trainModel();
+}
+
+static int nntrainer_model_invoke(const GstTensorTrainerFramework *fw,
+                                  const GstTensorTrainerProperties *prop,
+                                  void *private_data,
+                                  const GstTensorMemory *input) {
+  NNTrainer::InputTensorsInfo *data = nullptr;
+  NNTrainer::NNTrainerTrain *nntrainer =
+    reinterpret_cast<NNTrainer::NNTrainerTrain *>(private_data);
+  UNUSED(fw);
+  ml_logd("<called>");
+
+  if (!nntrainer) {
+    ml_loge("Failed get nntrainer");
+    return -1;
+  }
+  UNUSED(prop);
+
+  if (nntrainer->train_data->push_count == nntrainer->num_train_samples &&
+      nntrainer->valid_data->push_count == nntrainer->num_valid_samples) {
+    ml_logd("data is full");
+    return 0;
+  }
+
+  if (nntrainer->train_data->push_count < nntrainer->num_train_samples) {
+    data = nntrainer->train_data.get();
+    ml_logd("#### T-Data ####");
+  } else if (nntrainer->valid_data->push_count < nntrainer->num_valid_samples) {
+    data = nntrainer->valid_data.get();
+    ml_logd("#### V-Data ####");
+  }
+
+  ml_logd("number of inputs(%d) and labels(%d)", nntrainer->num_inputs,
+          nntrainer->num_labels);
+
+  NNTrainer::TensorData tensor_data;
+  int64_t idx = 0, i = 0;
+  char *p_data = nullptr;
+  for (i = 0; i < data->num_inputs; i++) {
+    ml_logd("input[%d]:%p, size:%zd\n", i, input[i].data, input[i].size);
+    p_data = new char[input[idx].size];
+    std::memcpy(p_data, input[idx].data, input[idx].size);
+    tensor_data.inputs.emplace_back(p_data);
+    ml_logd("input[%d].data = p %p\n", idx, (input[idx].data));
+    ml_logd("tensor_data.inputs[%d] = %p\n", idx, tensor_data.inputs[idx]);
+    idx++;
+  }
+  for (i = 0; i < data->num_labels; i++) {
+    p_data = new char[input[idx].size];
+    std::memcpy(p_data, input[idx].data, input[idx].size);
+    tensor_data.labels.emplace_back(p_data);
+    idx++;
+  }
+
+  data->tensor_data.emplace_back(tensor_data);
+  data->push_count++;
+
+  ml_logd("(pop/push: %d/%d)", data->pop_count, data->push_count);
+
+#if 0
+  for (auto data : data->tensor_data) {
+    for (auto inputs : data.inputs) {
+      ml_logd("##I addr:%p", inputs);
+    }
+    for (auto labels : data.labels) {
+      ml_logd("##L addr:%p", labels);
+    }
+  }
+#endif
+
+  if (data->is_mutex_locked && data->push_count > data->pop_count) {
+    pthread_mutex_lock(&data->mutex);
+    ml_logd("send signal");
+    pthread_cond_signal(&data->cond);
+    pthread_mutex_unlock(&data->mutex);
+  }
+
+  ml_logd("T-pushed:%d/%d, V-pushed:%d/%d\n", nntrainer->train_data->push_count,
+          nntrainer->num_train_samples, nntrainer->valid_data->push_count,
+          nntrainer->num_valid_samples);
+
+  ml_logd("<leaved>");
+  return 0;
+}
+
+int getSample(float **input, float **label, bool *last, void *user_data) {
+
+  auto data = reinterpret_cast<NNTrainer::InputTensorsInfo *>(user_data);
+
+  ml_logd("<called>");
+  ml_logd("(pop/push: %d/%d)", data->pop_count, data->push_count);
+  pid_t pid = getpid();
+  pid_t tid = syscall(SYS_gettid);
+
+  ml_logd("<called>");
+  ml_logd("pid[%d], tid[%d]", pid, tid);
+
+  if (data->push_count <= data->pop_count) {
+    pthread_mutex_lock(&data->mutex);
+    data->is_mutex_locked = TRUE;
+    ml_logd("locked, need to wait for more data");
+    pthread_cond_wait(&data->cond, &data->mutex);
+    ml_logd("unlocked, get data");
+    pthread_mutex_unlock(&data->mutex);
+    data->is_mutex_locked = FALSE;
+  }
+
+  ml_logd("num_inputs: %d, num_labels: %d", data->num_inputs, data->num_labels);
+
+  int64_t i = 0;
+  int idx = data->pop_count;
+  ml_logd("pop idx: %d", idx);
+
+  for (i = 0; i < data->num_inputs; i++) {
+    ml_logd("memcpy Addr %p, %p, size=%d\n", *(input + i),
+            data->tensor_data[idx].inputs[i], data->input_size[i]);
+    std::memcpy(*(input + i), data->tensor_data[idx].inputs[i],
+                data->input_size[i]);
+  }
+  for (i = 0; i < data->num_labels; i++) {
+    ml_logd("memcpy Addr %p, %p, size=%d", *(label + i),
+            data->tensor_data[idx].labels[i], data->label_size[i]);
+    std::memcpy(*(label + i), data->tensor_data[idx].labels[i],
+                data->label_size[i]);
+  }
+
+  data->pop_count++;
+
+  ml_logd("(pop/push: %d/%d)", data->pop_count, data->push_count);
+
+  if (data->pop_count < data->num_samples) {
+    *last = false;
+  } else {
+    *last = true;
+    data->pop_count = 0;
+
+    std::random_device rd;
+    std::mt19937 g(rd());
+    std::shuffle(data->tensor_data.begin(), data->tensor_data.end(), g);
+  }
+
+  ml_logd("<leave>");
+
+  return 0;
+}
+
+void NNTrainer::NNTrainerTrain::createDataset() {
+
+  ml_logd("<called>");
+
+  train_data = std::make_unique<NNTrainer::InputTensorsInfo>(
+    num_train_samples, num_inputs, num_labels, tensors_inputsize);
+  valid_data = std::make_unique<NNTrainer::InputTensorsInfo>(
+    num_valid_samples, num_inputs, num_labels, tensors_inputsize);
+
+  if (num_train_samples) {
+    dataset_train = ml::train::createDataset(ml::train::DatasetType::GENERATOR,
+                                             getSample, train_data.get());
+  }
+  if (num_valid_samples) {
+    dataset_valid = ml::train::createDataset(ml::train::DatasetType::GENERATOR,
+                                             getSample, valid_data.get());
+  }
+  ml_logd("<leave>");
+}
+
+NNTrainer::InputTensorsInfo::InputTensorsInfo(int64_t _num_samples,
+                                              int64_t _num_inputs,
+                                              int64_t _num_labels,
+                                              int64_t _tensors_inputsize[]) :
+  is_mutex_locked(0),
+  push_count(0),
+  pop_count(0),
+  num_samples(_num_samples),
+  num_inputs(_num_inputs),
+  num_labels(_num_labels) {
+
+  ml_logd("<called>");
+
+  tensor_data.reserve(_num_samples);
+  pthread_mutex_init(&mutex, NULL);
+  pthread_cond_init(&cond, NULL);
+
+  int64_t idx = 0, i = 0;
+  for (i = 0; i < num_inputs; i++) {
+    input_size[i] = _tensors_inputsize[idx++];
+    ml_logd("input_size[%d]=%d", i, input_size[i]);
+  }
+  for (i = 0; i < num_labels; i++) {
+    label_size[i] = _tensors_inputsize[idx++];
+    ml_logd("label_size[%d]=%d", i, label_size[i]);
+  }
+
+  ml_logd("<leave>");
+}
+
+NNTrainer::InputTensorsInfo::~InputTensorsInfo() {
+  g_print("%s:%d:%s: <called>\n", __FILE__, __LINE__, __func__);
+
+  for (auto data : tensor_data) {
+    for (auto inputs : data.inputs) {
+      ml_logd("free: ##I addr:%p", inputs);
+      delete inputs;
+    }
+    for (auto labels : data.labels) {
+      ml_logd("free: ##L addr:%p", labels);
+      delete labels;
+    }
+  }
+}
+
+void NNTrainer::NNTrainerTrain::getNNStreamerProperties(
+  const GstTensorTrainerProperties *prop) {
+
+  int64_t i;
+  ml_logd("<called>");
+
+  num_tensors = prop->input_meta.num_tensors;
+  ml_logd("num_tensors: %d", num_tensors);
+
+  for (i = 0; i < num_tensors; i++) {
+    tensors_inputsize[i] = gst_tensor_info_get_size(&prop->input_meta.info[i]);
+    ml_logd("tensors_inputsize[%d]:%d", i, tensors_inputsize[i]);
+  }
+  // for mnist test
+#if 0
+  tensors_inputsize[1] = 40;
+  // tensors_inputsize[0] = 3686400; // 3:640:480:1 float32, 3x640x480x1x4
+  // tensors_inputsize[1] = 40;      // 1:1:10:1 uint8, 1x1x10x1x4
+  ml_logd("for Test: tensors_inputsize[1]:%d", tensors_inputsize[1]);
+#endif
+  num_inputs = prop->num_inputs;
+  num_labels = prop->num_labels;
+  num_train_samples = prop->num_train_samples;
+  num_valid_samples = prop->num_valid_samples;
+  model_save_path = prop->model_save_path;
+  train_complete_cond = prop->train_complete_cond;
+
+  ml_logd("num_inputs: %d", num_inputs);
+  ml_logd("num_labels: %d", num_labels);
+  ml_logd("num_train_samples: %d", num_train_samples);
+  ml_logd("num_valid_samples: %d", num_valid_samples);
+  ml_logd("model_config: %s", model_config.c_str());
+  ml_logd("model_config: %s", model_save_path.c_str());
+  ml_logd("<leave>");
+}
+
+static int nntrainer_model_destructor(const GstTensorTrainerFramework *fw,
+                                      const GstTensorTrainerProperties *prop,
+                                      void **private_data) {
+  NNTrainer::NNTrainerTrain *nntrainer =
+    static_cast<NNTrainer::NNTrainerTrain *>(*private_data);
+  UNUSED(fw);
+  ml_logd("<called>");
+
+  if (!nntrainer)
+    return -1;
+
+  delete nntrainer;
+  *private_data = NULL;
+  ml_logd("<leave>");
+
+  return 0;
+}
+
+static int nntrainer_model_train(const GstTensorTrainerFramework *fw,
+                                 const GstTensorTrainerProperties *prop,
+                                 void *private_data) {
+  NNTrainer::NNTrainerTrain *nntrainer =
+    reinterpret_cast<NNTrainer::NNTrainerTrain *>(private_data);
+  UNUSED(fw);
+
+  ml_logd("<called>");
+  if (!nntrainer) {
+    ml_loge("Failed get nntrainer");
+  }
+  try {
+    std::thread train_thread(nntrainer_thread_func, nntrainer);
+    train_thread.detach();
+  } catch (const std::exception &e) {
+    ml_loge("Error %s, %s", typeid(e).name(), e.what());
+    return -1;
+  }
+  ml_logd("<leave>");
+  return 0;
+}
+
+void NNTrainer::NNTrainerTrain::trainModel() {
+  pid_t pid = getpid();
+  pid_t tid = syscall(SYS_gettid);
+
+  ml_logd("<called>");
+  ml_logd("pid[%d], tid[%d]", pid, tid);
+
+  try {
+    model->train();
+    training_loss = model->getTrainingLoss();
+    validation_loss = model->getValidationLoss();
+  } catch (const std::exception &e) {
+    ml_loge("Error %s, %s", typeid(e).name(), e.what());
+    return;
+  }
+  ml_logd("training_loss: %f, validation_loss: %f", training_loss,
+          validation_loss);
+  try {
+    ml_logd("Save_model: %s", model_save_path.c_str());
+    model->save(model_save_path, ml::train::ModelFormat::MODEL_FORMAT_BIN);
+    ml_logd("send train_complete_cond signal");
+    g_cond_signal(train_complete_cond);
+
+  } catch (const std::exception &e) {
+    ml_loge("Error %s, %s", typeid(e).name(), e.what());
+    return;
+  }
+  ml_logd("<leave>");
+}
+
+void NNTrainer::NNTrainerTrain::createModel() {
+  ml_logd("<called>");
+  try {
+    model = ml::train::createModel(ml::train::ModelType::NEURAL_NET);
+  } catch (const std::exception &e) {
+    ml_loge("Error %s, %s", typeid(e).name(), e.what());
+    return;
+  }
+  try {
+
+    model->load(model_config,
+                ml::train::ModelFormat::MODEL_FORMAT_INI_WITH_BIN);
+  } catch (const std::exception &e) {
+    ml_loge("Error %s, %s", typeid(e).name(), e.what());
+    return;
+  }
+  try {
+    model->compile();
+    model->initialize();
+    model->setDataset(ml::train::DatasetModeType::MODE_TRAIN, dataset_train);
+    model->setDataset(ml::train::DatasetModeType::MODE_VALID, dataset_valid);
+  } catch (const std::exception &e) {
+    ml_loge("Error %s, %s", typeid(e).name(), e.what());
+    return;
+  }
+  ml_logd("<leave>");
+}
+
+NNTrainer::NNTrainerTrain::NNTrainerTrain(
+  const GstTensorTrainerProperties *prop, const std::string &_model_config) :
+  model_config(_model_config) {
+  ml_logd("<called>");
+  getNNStreamerProperties(prop);
+  createDataset();
+  createModel();
+  ml_logd("<leave>");
+}
+
+static int
+nntrainer_model_construct_with_conf(const GstTensorTrainerFramework *fw,
+                                    const GstTensorTrainerProperties *prop,
+                                    void **private_data) {
+  NNTrainer::NNTrainerTrain *nntrainer =
+    static_cast<NNTrainer::NNTrainerTrain *>(*private_data);
+  ml_logd("<called>");
+  if (nntrainer)
+    nntrainer_model_destructor(fw, prop, private_data);
+
+  try {
+    nntrainer = new NNTrainer::NNTrainerTrain(prop, prop->model_config);
+  } catch (const std::exception &e) {
+    ml_loge("Error %s, %s", typeid(e).name(), e.what());
+    return -1;
+  }
+
+  *private_data = nntrainer;
+
+  ml_logd("<leave>");
+  return 0;
+}
+
+static int nntrainer_model_construct(const GstTensorTrainerFramework *fw,
+                                     const GstTensorTrainerProperties *prop,
+                                     void **private_data) {
+  ml_logd("<called>");
+  int status = nntrainer_model_construct_with_conf(fw, prop, private_data);
+
+  ml_logd("<leave>");
+  return status;
+}
+
+static int nntrainer_getFrameworkInfo(const GstTensorTrainerFramework *fw,
+                                      const GstTensorTrainerProperties *prop,
+                                      void *private_data,
+                                      GstTensorTrainerFrameworkInfo *fw_info) {
+  static gchar subplugin_name[] = "nntrainer";
+  ml_logd("<called>");
+  UNUSED(fw);
+  UNUSED(prop);
+  UNUSED(private_data);
+
+  fw_info->name = subplugin_name;
+  ml_logd("<leave>");
+  return 0;
+}
+
+static GstTensorTrainerFramework NNS_Trainer_support_nntrainer = {
+  .version = GST_TENSOR_TRAINER_FRAMEWORK_V1,
+  .create = nntrainer_model_construct,
+  .destroy = nntrainer_model_destructor,
+  .train = nntrainer_model_train,
+  .invoke = nntrainer_model_invoke,
+  .getFrameworkInfo = nntrainer_getFrameworkInfo};
+
+void init_subplugin_nntrainer(void) {
+  nnstreamer_trainer_probe(&NNS_Trainer_support_nntrainer);
+}
+
+void fini_subplugin_nntrainer(void) {
+  nnstreamer_trainer_exit(&NNS_Trainer_support_nntrainer);
+}
diff --git a/nnstreamer/tensor_trainer/tensor_trainer_nntrainer.hh b/nnstreamer/tensor_trainer/tensor_trainer_nntrainer.hh
new file mode 100644 (file)
index 0000000..77a28f6
--- /dev/null
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: Apache-2.0 */
+/**
+ * NNStreamer tensor_trainer subplugin for nntrainer
+ * Copyright (C) 2022 Hyunil Park <hyunil46.park@samsung.com>
+ */
+/**
+ * @file   tensor_trainer_nntrainer.hh
+ * @date   02 Dec 2022
+ * @brief  NNStreamer tensor_trainer subplugin header
+ * @see    http://github.com/nnstreamer/nnstreamer
+ * @author Hyunil Park <hyunil46.park@samsung.com>
+ * @bug    No known bugs except for NYI items
+ */
+
+#include <model.h>
+#include <nnstreamer_plugin_api.h>
+#include <nnstreamer_plugin_api_trainer.h>
+#include <vector>
+
+namespace NNTrainer {
+
+/**
+ * @brief Manage multiple inputs and labels data
+ */
+struct TensorData {
+  std::vector<char *> inputs;
+  std::vector<char *> labels;
+};
+
+class InputTensorsInfo;
+
+/**
+ * @brief NNTrainer interface for nnstreamer trainer subplugin
+ */
+class NNTrainerTrain {
+public:
+  /**
+   * @brief Construct a new NNTrainerTrain object
+   * @param prop tensor trainer subplugin properties
+   * @param _model_config model configuration file path
+   */
+  NNTrainerTrain(const GstTensorTrainerProperties *prop,
+                 const std::string &_model_config);
+
+  /**
+   * @brief Destroy the NNTrainerTrain object
+   */
+  ~NNTrainerTrain() = default;
+
+  /**
+   * @brief Create model
+   */
+  void createModel();
+
+  /**
+   * @brief Train model
+   */
+  void trainModel();
+
+  /**
+   * @brief Create dataset
+   */
+  void createDataset();
+
+  /**
+   * @brief Get NNStreamer tensor_trainer properties
+   * @param prop Tensor trainer subplugin properties
+   */
+  void getNNStreamerProperties(const GstTensorTrainerProperties *prop);
+
+  /**
+   * @brief Manage sample data
+   */
+  std::unique_ptr<NNTrainer::InputTensorsInfo> train_data, valid_data;
+  /**
+   * @brief Nntrainer dataset
+   */
+  std::shared_ptr<ml::train::Dataset> dataset_train, dataset_valid;
+  float training_loss, validation_loss;
+
+  int64_t tensors_inputsize[NNS_TENSOR_SIZE_LIMIT];
+  int64_t num_tensors;
+  int64_t num_inputs;
+  int64_t num_labels;
+  int64_t num_train_samples;
+  int64_t num_valid_samples;
+  std::string model_config;
+  std::string model_save_path;
+
+  GCond *train_complete_cond;
+
+private:
+  std::unique_ptr<ml::train::Model> model;
+};
+
+/**
+ * @brief Manage input tensors data and information
+ */
+class InputTensorsInfo {
+public:
+  /**
+   * @brief Construct a new InputTensorsInfo object
+   * @param _num_samples number of samples
+   * @param _num_inputs number of inputs
+   * @param _num_labels number of labels
+   * @param _tensors_inputsize[] input tensors size
+   */
+  InputTensorsInfo(int64_t _num_samples, int64_t _num_inputs,
+                   int64_t _num_labels, int64_t _tensors_inputsize[]);
+
+  /**
+   * @brief Destroy the InputTensorsInfo object
+   */
+  ~InputTensorsInfo();
+
+  bool is_mutex_locked;
+  int64_t push_count;
+  int64_t pop_count;
+  int64_t input_size[NNS_TENSOR_SIZE_LIMIT]; // feature size * data type
+  int64_t label_size[NNS_TENSOR_SIZE_LIMIT];
+  int64_t num_samples;
+  int64_t num_inputs;
+  int64_t num_labels;
+
+  std::vector<TensorData> tensor_data;
+  pthread_mutex_t mutex;
+  pthread_cond_t cond;
+};
+} // namespace NNTrainer
index c271ca7..fa38858 100644 (file)
@@ -1,6 +1,7 @@
 # Execute gbs with --define "testcoverage 1" in case that you must get unittest coverage statistics
 %define         use_cblas 1
 %define         nnstreamer_filter 1
+%define         nnstreamer_trainer 1
 %define         nnstreamer_subplugin_path /usr/lib/nnstreamer
 %define         use_gym 0
 %define         support_ccapi 1
@@ -115,6 +116,7 @@ BuildRequires: tensorflow2-lite-devel
 %endif # support_tflite_interpreter
 
 %define enable_nnstreamer_tensor_filter -Denable-nnstreamer-tensor-filter=false
+%define enable_nnstreamer_tensor_trainer -Denable-nnstreamer-tensor-trainer=false
 
 %if  0%{?nnstreamer_filter}
 BuildRequires: nnstreamer-devel
@@ -126,15 +128,25 @@ BuildRequires:    nnstreamer-test-devel
 %endif
 BuildRequires: gst-plugins-good-extra
 BuildRequires: python
-%endif #unit_test
-%endif #nnstreamer_filter
-%endif  # tizen
+%endif # unit_test
+%endif # nnstreamer_filter
+
+%if  0%{?nnstreamer_trainer}
+BuildRequires: nnstreamer-devel
+%define enable_nnstreamer_tensor_trainer -Denable-nnstreamer-tensor-trainer=true
+%endif # nnstreamer_trainer
+%endif # tizen
 
 Requires:      nntrainer-core = %{version}-%{release}
 
 %if 0%{?nnstreamer_filter}
 Requires:      nnstreamer-nntrainer = %{version}-%{release}
 %endif #nnstreamer_filter
+
+%if 0%{?nnstreamer_trainer}
+Requires:      nnstreamer-nntrainer = %{version}-%{release}
+%endif #nnstreamer_trainer
+
 %if %{with tizen}
 Requires:      capi-machine-learning-training = %{version}-%{release}
 %endif #tizen
@@ -266,21 +278,37 @@ Static library of ccapi-machine-learning-training-devel package.
 %endif
 
 %if 0%{?nnstreamer_filter}
-%package -n nnstreamer-nntrainer
+%package -n nnstreamer-nntrainer-filter
 Summary: NNStreamer NNTrainer support
 Requires: %{name} = %{version}-%{release}
 Requires:      nnstreamer
-%description -n nnstreamer-nntrainer
+%description -n nnstreamer-nntrainer-filter
 NNSteamer tensor filter for nntrainer to support inference.
 
-%package -n nnstreamer-nntrainer-devel-static
+%package -n nnstreamer-nntrainer-filter-devel-static
 Summary: NNStreamer NNTrainer support
 Requires: nntrainer-devel-static = %{version}-%{release}
-Requires:      nnstreamer-nntrainer = %{version}-%{release}
-%description -n nnstreamer-nntrainer-devel-static
+Requires:      nnstreamer-nntrainer-filter = %{version}-%{release}
+%description -n nnstreamer-nntrainer-filter-devel-static
 NNSteamer tensor filter static package for nntrainer to support inference.
 %endif #nnstreamer_filter
 
+%if 0%{?nnstreamer_trainer}
+%package -n nnstreamer-nntrainer-trainer
+Summary: NNStreamer NNTrainer support
+Requires: %{name} = %{version}-%{release}
+Requires:      nnstreamer
+%description -n nnstreamer-nntrainer-trainer
+NNSteamer tensor trainer for nntrainer to support inference.
+
+%package -n nnstreamer-nntrainer-trainer-devel-static
+Summary: NNStreamer NNTrainer support
+Requires: nntrainer-devel-static = %{version}-%{release}
+Requires:      nnstreamer-nntrainer-trainer = %{version}-%{release}
+%description -n nnstreamer-nntrainer-trainer-devel-static
+NNSteamer tensor trainer static package for nntrainer to support inference.
+%endif #nnstreamer_trainer
+
 %endif #tizen
 
 ## Define build options
@@ -367,12 +395,12 @@ meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} \
       --libdir=%{_libdir} --bindir=%{nntrainerapplicationdir} \
       --includedir=%{_includedir} %{install_app} %{platform} \
       %{enable_tizen_feature_check} %{enable_cblas} %{enable_ccapi} \
-      %{enable_gym} %{enable_nnstreamer_tensor_filter} %{enable_profile} \
-      %{enable_nnstreamer_backbone} %{enable_tflite_backbone} \
+      %{enable_gym} %{enable_nnstreamer_tensor_filter} %{enable_nnstreamer_tensor_trainer} \
+      %{enable_profile} %{enable_nnstreamer_backbone} %{enable_tflite_backbone} \
       %{enable_tflite_interpreter} %{capi_ml_pkg_dep_resolution} \
       %{enable_reduce_tolerance} %{configure_subplugin_install_path} %{enable_debug} \
-      -Dml-api-support=enabled -Denable-nnstreamer-tensor-filter=true \
-      -Denable-capi=enabled \
+      -Dml-api-support=enabled -Denable-nnstreamer-tensor-filter=enabled \
+      -Denable-nnstreamer-tensor-trainer=enabled -Denable-capi=enabled \
       build
 
 ninja -C build %{?_smp_mflags}
@@ -380,6 +408,7 @@ ninja -C build %{?_smp_mflags}
 %if 0%{?unit_test}
 export NNSTREAMER_CONF=$(pwd)/test/nnstreamer/nnstreamer-test.ini
 export NNSTREAMER_FILTERS=$(pwd)/build/nnstreamer/tensor_filter
+export NNSTREAMER_TRAINERS=$(pwd)/build/nnstreamer/tensor_trainer
 meson test -C build -t 2.0 --print-errorlogs
 
 # unittest for nntrainer plugin for nnstreamer
@@ -389,6 +418,11 @@ pushd test/nnstreamer
 ssat
 popd
 %endif #nnstreamer_filter
+%if 0%{?nnstreamer_trainer}
+pushd test/nnstreamer
+ssat
+popd
+%endif #nnstreamer_trainer
 %endif #unit_test
 
 %if 0%{?gcov:1}
@@ -544,19 +578,35 @@ cp -r result %{buildroot}%{_datadir}/nntrainer/unittest/
 %endif # support_ccapi
 
 %if 0%{?nnstreamer_filter}
-%files -n nnstreamer-nntrainer
+%files -n nnstreamer-nntrainer-filter
 %manifest nntrainer.manifest
 %defattr(-,root,root,-)
 %license LICENSE
 %{nnstreamer_subplugin_path}/filters/libnnstreamer_filter_nntrainer.so
 
-%files -n nnstreamer-nntrainer-devel-static
+%files -n nnstreamer-nntrainer-filter-devel-static
 %manifest nntrainer.manifest
 %defattr(-,root,root,-)
 %license LICENSE
 %{_libdir}/libnnstreamer_filter_nntrainer.a
 
 %endif #nnstreamer_filter
+
+%if 0%{?nnstreamer_trainer}
+%files -n nnstreamer-nntrainer-trainer
+%manifest nntrainer.manifest
+%defattr(-,root,root,-)
+%license LICENSE
+%{nnstreamer_subplugin_path}/trainers/libnnstreamer_trainer_nntrainer.so
+
+%files -n nnstreamer-nntrainer-trainer-devel-static
+%manifest nntrainer.manifest
+%defattr(-,root,root,-)
+%license LICENSE
+%{_libdir}/libnnstreamer_trainer_nntrainer.a
+
+%endif #nnstreamer_trainer
+
 %endif #tizen
 
 %files applications
index 095b908..5a9d491 100644 (file)
@@ -42,11 +42,11 @@ endif
 
 nnstreamer_test_dep = dependency('nnstreamer-test-devel', required: false)
 
-if get_option('enable-nnstreamer-tensor-filter') and nnstreamer_test_dep.found()
+if get_option('enable-nnstreamer-tensor-filter').enabled() and nnstreamer_test_dep.found()
   subdir('nnstreamer')
 endif
 
-if get_option('enable-tflite-interpreter') or get_option('enable-nnstreamer-tensor-filter')
+if get_option('enable-tflite-interpreter') or get_option('enable-nnstreamer-tensor-filter').enabled()
   run_command('cp','-lr',
     meson.current_source_dir() / 'test_models/',
     nntrainer_test_resdir