[TRIx-Engine] Add TRIx-Engine plugin
authorDongju Chae <dongju.chae@samsung.com>
Wed, 12 Jan 2022 06:33:14 +0000 (15:33 +0900)
committerMyungJoo Ham <myungjoo.ham@samsung.com>
Fri, 21 Jan 2022 05:12:58 +0000 (14:12 +0900)
This patch adds TRIx-Engine tensor filter subplugin implementation.
It requires TRIV2 NPU HW and devel packages from the upstream tizen repo.

Signed-off-by: Dongju Chae <dongju.chae@samsung.com>
ext/nnstreamer/tensor_filter/meson.build
ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.cc [new file with mode: 0644]
ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.hh [new file with mode: 0644]
meson.build
meson_options.txt
packaging/nnstreamer.spec

index 71a092f..077f91e 100644 (file)
@@ -687,5 +687,29 @@ if tvm_support_is_available
     install: true,
     install_dir: nnstreamer_libdir
   )
+endif
+
+if trix_engine_support_is_available
+  nnstreamer_filter_trix_engine_deps = trix_engine_support_deps + [glib_dep, gst_dep, nnstreamer_dep]
+
+  filter_sub_trix_engine_sources = ['tensor_filter_trix_engine.cc']
+
+  nnstreamer_filter_trix_engine_sources = []
+  foreach s : filter_sub_trix_engine_sources
+    nnstreamer_filter_trix_engine_sources += join_paths(meson.current_source_dir(), s)
+  endforeach
+
+  shared_library('nnstreamer_filter_trix-engine',
+    nnstreamer_filter_trix_engine_sources,
+    dependencies: nnstreamer_filter_trix_engine_deps,
+    install: true,
+    install_dir: filter_subplugin_install_dir
+  )
 
+  static_library('nnstreamer_filter_trix-engine',
+    nnstreamer_filter_trix_engine_sources,
+    dependencies: nnstreamer_filter_trix_engine_deps,
+    install: true,
+    install_dir: nnstreamer_libdir
+  )
 endif
diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.cc b/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.cc
new file mode 100644 (file)
index 0000000..f1d867c
--- /dev/null
@@ -0,0 +1,349 @@
+/* SPDX-License-Identifier: LGPL-2.1-only */
+/**
+ * Copyright (C) 2020 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
+ */
+/**
+ * @file tensor_filter_trix_engine.cc
+ * @date 20 Jan 2020
+ * @brief NNStreamer tensor-filter subplugin for TRIx devices
+ * @see http://github.com/nnstreamer/nnstreamer
+ * @author Dongju Chae <dongju.chae@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <tensor_filter_trix_engine.hh>
+
+using namespace std;
+
+namespace nnstreamer {
+
+void init_filter_trix_engine (void) __attribute__ ((constructor));
+void fini_filter_trix_engine (void) __attribute__ ((destructor));
+
+TensorFilterTRIxEngine *TensorFilterTRIxEngine::registered = nullptr;
+const char *TensorFilterTRIxEngine::name = "trix-engine";
+const accl_hw TensorFilterTRIxEngine::hw_list[] = {ACCL_NPU_SR};
+const int TensorFilterTRIxEngine::num_hw = 1;
+
+/**
+ * @brief Construct a new TRIx-Engine subplugin instance
+ */
+TensorFilterTRIxEngine::TensorFilterTRIxEngine ()
+    : dev_type_ (NPUCOND_CONN_UNKNOWN),
+      dev_ (nullptr),
+      model_path_ (nullptr),
+      model_meta_ (nullptr),
+      model_id_ (0) {
+  gst_tensors_info_init (addressof (nns_in_info_));
+  gst_tensors_info_init (addressof (nns_out_info_));
+}
+
+/**
+ * @brief Destruct the TRIx-Engine subplugin instance
+ */
+TensorFilterTRIxEngine::~TensorFilterTRIxEngine () {
+  g_free (model_path_);
+  g_free (model_meta_);
+
+  gst_tensors_info_free (std::addressof (nns_in_info_));
+  gst_tensors_info_free (std::addressof (nns_out_info_));
+
+  if (dev_ != nullptr) {
+    unregisterNPUmodel_all (dev_);
+    putNPUdevice (dev_);
+  }
+}
+
+/**
+ * @brief Method to get an empty object
+ */
+tensor_filter_subplugin &
+TensorFilterTRIxEngine::getEmptyInstance () {
+  return *(new TensorFilterTRIxEngine ());
+}
+
+/**
+ * @brief Configure TRIx-Engine instance
+ */
+void
+TensorFilterTRIxEngine::configure_instance (const GstTensorFilterProperties *prop) {
+  if (!prop->model_files[0] || prop->model_files[0][0] == '\0') {
+    nns_loge ("Unable to find a model filepath given\n");
+    throw invalid_argument ("Unable to find a model filepath given");
+  }
+
+  model_path_ = g_strdup (prop->model_files[0]);
+  model_meta_ = getNPUmodel_metadata (model_path_, false);
+  if (model_meta_ == nullptr) {
+    nns_loge ("Unable to extract the model metadata\n");
+    throw runtime_error ("Unable to extract the model metadata");
+  }
+
+  int status = -ENOENT;
+  for (int i = 0; i < prop->num_hw; i++) {
+    /* TRIV2 alias for now */
+    if (prop->hw_list[i] == ACCL_NPU_SR) {
+      status = getNPUdeviceByTypeAny (&dev_, NPUCOND_TRIV2_CONN_SOCIP, 2);
+      if (status == 0)
+        break;
+    }
+  }
+
+  if (status != 0) {
+    nns_loge ("Unable to find a proper NPU device\n");
+    throw runtime_error ("Unable to find a proper NPU device");
+  }
+
+  generic_buffer model_file;
+  model_file.filepath = model_path_;
+  model_file.size = model_meta_->size;
+  model_file.type = BUFFER_FILE;
+
+  if (registerNPUmodel (dev_, &model_file, &model_id_) != 0) {
+    nns_loge ("Unable to register the model\n");
+    throw runtime_error ("Unable to register the model");
+  }
+
+  /* check user-provided input tensor info */
+  if (prop->input_meta.num_tensors == 0) {
+    nns_in_info_.num_tensors = model_meta_->input_seg_num;
+    for (uint32_t i = 0; i < nns_in_info_.num_tensors; i++) {
+      nns_in_info_.info[i].type = _NNS_UINT8;
+      for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
+        nns_in_info_.info[i].dimension[j] =
+            model_meta_->input_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
+    }
+  } else {
+    gst_tensors_info_copy (&nns_in_info_, &prop->input_meta);
+  }
+
+  /* check user-provided output tensor info */
+  if (prop->input_meta.num_tensors == 0) {
+    nns_out_info_.num_tensors = model_meta_->output_seg_num;
+    for (uint32_t i = 0; i < nns_out_info_.num_tensors; i++) {
+      nns_out_info_.info[i].type = _NNS_UINT8;
+      for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
+        nns_out_info_.info[i].dimension[j] =
+            model_meta_->output_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
+    }
+  } else {
+    gst_tensors_info_copy (&nns_out_info_, &prop->output_meta);
+  }
+
+  set_data_info (prop);
+}
+
+/**
+ * @brief Convert data layout (from NNStreamer to TRIx-Engine)
+ */
+data_layout
+TensorFilterTRIxEngine::convert_data_layout (const tensor_layout &layout) {
+  switch (layout) {
+    case _NNS_LAYOUT_NHWC:
+      return DATA_LAYOUT_NHWC;
+    case _NNS_LAYOUT_NCHW:
+      return DATA_LAYOUT_NCHW;
+    default:
+      return DATA_LAYOUT_MODEL;
+  }
+}
+
+/**
+ * @brief Convert data type (from NNStreamer to TRIx-Engine)
+ */
+data_type
+TensorFilterTRIxEngine::convert_data_type (const tensor_type &type) {
+  switch (type) {
+    case _NNS_INT32:
+      return DATA_TYPE_INT32;
+    case _NNS_UINT32:
+      return DATA_TYPE_UINT32;
+    case _NNS_INT16:
+      return DATA_TYPE_INT16;
+    case _NNS_UINT16:
+      return DATA_TYPE_UINT16;
+    case _NNS_INT8:
+      return DATA_TYPE_INT8;
+    case _NNS_UINT8:
+      return DATA_TYPE_UINT8;
+    case _NNS_FLOAT64:
+      return DATA_TYPE_FLOAT64;
+    case _NNS_FLOAT32:
+      return DATA_TYPE_FLOAT32;
+    case _NNS_INT64:
+      return DATA_TYPE_INT64;
+    case _NNS_UINT64:
+      return DATA_TYPE_UINT64;
+    default:
+      return DATA_TYPE_MODEL;
+  }
+}
+
+/**
+ * @brief Set data info of input/output tensors using metadata
+ */
+void
+TensorFilterTRIxEngine::set_data_info (const GstTensorFilterProperties *prop) {
+  const tensor_layout *input_layout = &(prop->input_layout[0]);
+  const tensor_layout *output_layout = &(prop->output_layout[0]);
+
+  trix_in_info_.num_info = model_meta_->input_seg_num;
+
+  for (uint32_t idx = 0; idx < trix_in_info_.num_info; ++idx) {
+    trix_in_info_.info[idx].layout = convert_data_layout (input_layout[idx]);
+    trix_in_info_.info[idx].type = convert_data_type (nns_in_info_.info[idx].type);
+  }
+
+  trix_out_info_.num_info = model_meta_->output_seg_num;
+
+  for (uint32_t idx = 0; idx < trix_out_info_.num_info; ++idx) {
+    trix_out_info_.info[idx].layout = convert_data_layout (output_layout[idx]);
+    trix_out_info_.info[idx].type = convert_data_type (nns_out_info_.info[idx].type);
+  }
+}
+
+/**
+ * @brief Feed the tensor data to input buffers before invoke()
+ */
+void
+TensorFilterTRIxEngine::feed_input_data (const GstTensorMemory *input, input_buffers *input_buf) {
+  input_buf->num_buffers = model_meta_->input_seg_num;
+
+  for (uint32_t idx = 0; idx < input_buf->num_buffers; ++idx) {
+    input_buf->bufs[idx].addr = input[idx].data;
+    input_buf->bufs[idx].size = input[idx].size;
+    input_buf->bufs[idx].type = BUFFER_MAPPED;
+  }
+}
+
+/**
+ * @brief Extract the tensor data from output buffers after invoke()
+ */
+void
+TensorFilterTRIxEngine::extract_output_data (const output_buffers *output_buf,
+                                             GstTensorMemory *output) {
+  /* internal logic error */
+  assert (output_buf->num_buffers == model_meta_->output_seg_num);
+
+  for (uint32_t idx = 0; idx < output_buf->num_buffers; ++idx) {
+    output[idx].data = output_buf->bufs[idx].addr;
+    output[idx].size = output_buf->bufs[idx].size;
+  }
+}
+
+/**
+ * @brief Invoke TRIxEngine using input tensors
+ */
+void
+TensorFilterTRIxEngine::invoke (const GstTensorMemory *input, GstTensorMemory *output) {
+  int req_id;
+  int status;
+
+  status = createNPU_request (dev_, model_id_, &req_id);
+  if (status != 0) {
+    nns_loge ("Unable to create NPU request with model id (%u): %d", model_id_, status);
+    return;
+  }
+
+  input_buffers input_buf = {0};
+  output_buffers output_buf = {0};
+  /* feed input data to npu-engine */
+  feed_input_data (input, &input_buf);
+
+  status =
+      setNPU_requestData (dev_, req_id, &input_buf, &trix_in_info_, &output_buf, &trix_out_info_);
+  if (status != 0) {
+    nns_loge ("Unable to create NPU request for model %u", model_id_);
+    return;
+  }
+
+  status = submitNPU_request (dev_, req_id);
+  if (status != 0) {
+    nns_loge ("Unable to submit NPU request with id (%u): %d", req_id, status);
+    return;
+  }
+  /* extract output data from npu-engine */
+  extract_output_data (&output_buf, output);
+
+  status = removeNPU_request (dev_, req_id);
+  if (status != 0) {
+    nns_loge ("Unable to remove NPU request with id (%u): %d", req_id, status);
+    return;
+  }
+}
+
+/**
+ * @brief Get TRIxEngine framework info.
+ */
+void
+TensorFilterTRIxEngine::getFrameworkInfo (GstTensorFilterFrameworkInfo &info) {
+  info.name = name;
+  info.allow_in_place = FALSE;
+  info.allocate_in_invoke = TRUE;
+  info.run_without_model = FALSE;
+  info.verify_model_path = TRUE;
+  info.hw_list = hw_list;
+  info.num_hw = num_hw;
+}
+
+/**
+ * @brief Get TRIxEngine model info.
+ */
+int
+TensorFilterTRIxEngine::getModelInfo (model_info_ops ops, GstTensorsInfo &in_info,
+                                      GstTensorsInfo &out_info) {
+  if (ops != GET_IN_OUT_INFO) {
+    return -ENOENT;
+  }
+
+  gst_tensors_info_copy (addressof (in_info), addressof (nns_in_info_));
+  gst_tensors_info_copy (addressof (out_info), addressof (nns_out_info_));
+  return 0;
+}
+
+/**
+ * @brief Method to handle the event
+ */
+int
+TensorFilterTRIxEngine::eventHandler (event_ops ops, GstTensorFilterFrameworkEventData &data) {
+  UNUSED (ops);
+  UNUSED (data);
+  return -ENOENT;
+}
+
+/**
+ * @brief Register the subpkugin
+ */
+void
+TensorFilterTRIxEngine::init_filter_trix_engine () {
+  registered = tensor_filter_subplugin::register_subplugin<TensorFilterTRIxEngine> ();
+}
+
+/**
+ * @brief Destruct the subplugin
+ */
+void
+TensorFilterTRIxEngine::fini_filter_trix_engine () {
+  /* internal logic error */
+  assert (registered != nullptr);
+  tensor_filter_subplugin::unregister_subplugin (registered);
+}
+
+/**
+ * @brief Subplugin initializer
+ */
+void
+init_filter_trix_engine () {
+  TensorFilterTRIxEngine::init_filter_trix_engine ();
+}
+
+/**
+ * @brief Subplugin finalizer
+ */
+void
+fini_filter_trix_engine () {
+  TensorFilterTRIxEngine::fini_filter_trix_engine ();
+}
+
+} /* namespace nnstreamer */
diff --git a/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.hh b/ext/nnstreamer/tensor_filter/tensor_filter_trix_engine.hh
new file mode 100644 (file)
index 0000000..e515d32
--- /dev/null
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: LGPL-2.1-only */
+/**
+ * Copyright (C) 2020 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
+ */
+/**
+ * @file    tensor_filter_subplugin_trix_engine.hh
+ * @date    20 Jan 2020
+ * @brief   NNStreamer tensor-filter subplugin trix_engine header
+ * @see     https://github.com/nnstreamer/nnstreamer
+ * @author  Dongju Chae <dongju.chae@samsung.com>
+ * @bug     No known bugs
+ */
+
+#ifndef __TENSOR_FILTER_SUBPLUGIN_TRIxEngine_H__
+#define __TENSOR_FILTER_SUBPLUGIN_TRIxEngine_H__
+
+/* npu-engine headers */
+#include <npubinfmt.h>
+#include <libnpuhost.h>
+
+/* nnstreamer plugin api headers */
+#include <nnstreamer_plugin_api.h>
+#include <nnstreamer_cppplugin_api_filter.hh>
+#include <nnstreamer_util.h>
+#include <nnstreamer_log.h>
+
+namespace nnstreamer {
+
+/**
+ * @brief Class for TRIx-Engine subplugin
+ */
+class TensorFilterTRIxEngine : public tensor_filter_subplugin {
+ public:
+  TensorFilterTRIxEngine ();
+  ~TensorFilterTRIxEngine ();
+
+  /* mandatory methods */
+  tensor_filter_subplugin &getEmptyInstance ();
+  void configure_instance (const GstTensorFilterProperties *prop);
+  void invoke (const GstTensorMemory *input, GstTensorMemory *output);
+  void getFrameworkInfo (GstTensorFilterFrameworkInfo &info);
+  int getModelInfo (model_info_ops ops, GstTensorsInfo &in_info, GstTensorsInfo &out_info);
+  int eventHandler (event_ops ops, GstTensorFilterFrameworkEventData &data);
+
+  /* static methods */
+  static void init_filter_trix_engine ();
+  static void fini_filter_trix_engine ();
+
+ private:
+  static data_layout convert_data_layout (const tensor_layout &layout);
+  static data_type convert_data_type (const tensor_type &type);
+
+  static TensorFilterTRIxEngine *registered;
+  static const char *name;
+  static const accl_hw hw_list[];
+  static const int num_hw;
+
+  void set_data_info (const GstTensorFilterProperties *prop);
+  void feed_input_data (const GstTensorMemory *input, input_buffers *input_buf);
+  void extract_output_data (const output_buffers *output_buf, GstTensorMemory *output);
+
+  /* trix-engine vars */
+  dev_type dev_type_;
+  npudev_h dev_;
+  gchar *model_path_;
+  npubin_meta *model_meta_;
+  uint32_t model_id_;
+  tensors_data_info trix_in_info_;
+  tensors_data_info trix_out_info_;
+
+  /* nnstreamer vars */
+  GstTensorsInfo nns_in_info_;
+  GstTensorsInfo nns_out_info_;
+};
+
+} /* namespace nnstreamer */
+
+#endif /* __TENSOR_FILTER_SUBPLUGIN_TRIxEngine_H__ */
index 76773cf..6af3eb4 100644 (file)
@@ -295,6 +295,11 @@ if not get_option('armnn-support').disabled()
   endif
 endif
 
+trix_engine_dep = dependency('', required: false)
+if not get_option('trix-engine-support').disabled()
+  trix_engine_dep = dependency('npu-engine', required: false)
+endif
+
 # features registration to be controlled
 #
 # register feature as follows
@@ -384,6 +389,10 @@ features = {
   'query-hybrid-support': {
     'target': 'nnsquery',
     'project_args': { 'ENABLE_QUERY_HYBRID': 1 }
+  },
+  'trix-engine-support': {
+    'extra_deps': [ trix_engine_dep ],
+    'project_args': { 'ENABLE_TRIX_ENGINE' : 1 }
   }
 }
 
index b7c6b73..ec86f96 100644 (file)
@@ -21,6 +21,7 @@ option('lua-support', type: 'feature', value: 'auto')
 option('mqtt-support', type: 'feature', value: 'auto')
 option('tvm-support', type: 'feature', value: 'auto')
 option('query-hybrid-support', type: 'feature', value: 'auto')
+option('trix-engine-support', type: 'feature', value: 'auto')
 
 # booleans & other options
 option('enable-test', type: 'boolean', value: true)
index 9c44eac..f1a49e7 100644 (file)
@@ -35,6 +35,7 @@
 %define                lua_support 1
 %define                tvm_support 1
 %define                snpe_support 1
+%define                trix_engine_support 1
 
 %define                check_test 1
 %define                release_test 1
@@ -66,6 +67,7 @@
 %if ( 0%{?tizen_version_major} == 6 && 0%{?tizen_version_minor} < 5 ) || 0%{?tizen_version_major} < 6
 %define                grpc_support 0
 %define                tensorflow2_lite_support 0
+%define                trix_engine_support 0
 %endif
 
 # Disable e-TPU if it's not 64bit system
 %define                lua_support 0
 %define                mqtt_support 0
 %define                tvm_support 0
+%define                trix_engine_support 0
 %endif
 
 # Release unit test suite as a subpackage only if check_test is enabled.
@@ -285,6 +288,10 @@ BuildRequires:     tvm-runtime-devel
 BuildRequires: snpe-devel
 %endif
 
+%if 0%{?trix_engine_support}
+BuildRequires: npu-engine-devel
+%endif
+
 # Unit Testing Uses SSAT (hhtps://github.com/myungjoo/SSAT.git)
 %if 0%{?unit_test}
 BuildRequires: ssat >= 1.1.0
@@ -459,6 +466,16 @@ Requires:  snpe
 NNStreamer's tensor_fliter subplugin of snpe
 %endif
 
+# for trix-engone
+%if 0%{?trix_engine_support}
+%package trix-engine
+Summary:       NNStreamer TRIx-Engine support
+Requires:      nnstreamer = %{version}-%{release}
+Requires:      trix-engine
+%description trix-engine
+NNStreamer's tensor_filter subplugin of trix-engine
+%endif
+
 %package devel
 Summary:       Development package for custom tensor operator developers (tensor_filter/custom)
 Requires:      nnstreamer = %{version}-%{release}
@@ -739,6 +756,13 @@ Provides additional gstreamer plugins for nnstreamer pipelines
 %define enable_tvm -Dtvm-support=disabled
 %endif
 
+# Support trix-engine
+%if 0%{?trix_engine_support}
+%define enable_trix_engine -Dtrix-engine-support=enabled
+%else
+%define enable_trix_engine -Dtrix-engine-support=disabled
+%endif
+
 # Framework priority for each file extension
 %define fw_priority_bin ''
 %define fw_priority_nb ''
@@ -781,7 +805,8 @@ meson --buildtype=plain --prefix=%{_prefix} --sysconfdir=%{_sysconfdir} --libdir
        --bindir=%{nnstbindir} --includedir=include -Dsubplugindir=%{_prefix}/lib/nnstreamer \
        %{enable_tizen} %{element_restriction} %{fw_priority} -Denable-env-var=false -Denable-symbolic-link=false \
        %{enable_tf_lite} %{enable_tf2_lite} %{enable_tf} %{enable_pytorch} %{enable_caffe2} %{enable_python3} \
-       %{enable_nnfw_runtime} %{enable_mvncsdk2} %{enable_openvino} %{enable_armnn} %{enable_edgetpu}  %{enable_vivante} %{enable_flatbuf} \
+       %{enable_nnfw_runtime} %{enable_mvncsdk2} %{enable_openvino} %{enable_armnn} %{enable_edgetpu}  %{enable_vivante} \
+       %{enable_flatbuf} %{enable_trix_engine} \
        %{enable_tizen_sensor} %{enable_mqtt} %{enable_lua} %{enable_tvm} %{enable_test} %{enable_test_coverage} %{install_test} \
        build
 
@@ -1007,6 +1032,14 @@ cp -r result %{buildroot}%{_datadir}/nnstreamer/unittest/
 %defattr(-,root,root,-)
 %endif
 
+# for trix-engine
+%if 0%{?trix_engine_support}
+%files trix-engine
+%manifest nnstreamer.manifest
+%defattr(-,root,root,-)
+%{_prefix}/lib/nnstreamer/filters/libnnstreamer_filter_trix-engine.so
+%endif
+
 %files devel
 %{_includedir}/nnstreamer/tensor_if.h
 %{_includedir}/nnstreamer/tensor_typedef.h