This patch renames nnstreamer subplugin from srnpu to trix-engine.
Signed-off-by: Dongju Chae <dongju.chae@samsung.com>
The mandatory packages to be installed are as follows.
- `libmrpsim`: NPU Simulator ([Repo](https://github.sec.samsung.net/AIP/SIM_TrinityVision2))
- `npu-engine`: NPU User Library ([Repo](https://github.sec.samsung.net/AIP/NPU_SystemService))
-- `nnstreamer-srnpu`: NNStreamer Tensor Filter Subplugin for SRNPU ([Repo](https://github.sec.samsung.net/AIP/NPU_SystemService/tree/tizen/plugins/nnstreamer))
+- `nnstreamer-trix-engine`: NNStreamer Tensor Filter Subplugin for TRIx Engine ([Repo](https://github.sec.samsung.net/AIP/NPU_SystemService/tree/tizen/plugins/nnstreamer))
Note that before such packages are shipped to official emulator images, you should download the pre-built packages in [here](https://art.sec.samsung.net/artifactory/aip_generic/RPMS_FOR_TIZEN_EMUL/) and customize your emulator image. Please refer to [here](https://github.sec.samsung.net/AIP/TRIV2_TIZEN_SAMPLE) for detail.
List of devices attached
emulator-26101 device m-0405-1
$ ls -1 RPMS
-libmrpsim-3.8.67-0.i686.rpm
-nnstreamer-srnpu-2.3.15-0.i686.rpm
-npu-engine-2.3.15-0.i686.rpm
+libmrpsim-3.8.71-0.i686.rpm
+nnstreamer-trix-engine-2.3.16-0.i686.rpm
+npu-engine-2.3.16-0.i686.rpm
+npu-engine-utils-2.3.16-0.i686.rpm
$ bash install.sh
...
```
%endif
%if 0%{?nns_plugin}
-%package -n nnstreamer-srnpu
+%package -n nnstreamer-trix-engine
Summary: NNStreamer subplugin for SR-NPU device family
Requires: npu-engine
Requires: nnstreamer
-%description -n nnstreamer-srnpu
-Reference implementation of NNStreamer filter subplugin for SR-NPU (for now, TRIV2 only).
-%files -n nnstreamer-srnpu
+%description -n nnstreamer-trix-engine
+Reference implementation of NNStreamer filter subplugin for SR TRIx devices (for now, TRIV2 only).
+%files -n nnstreamer-trix-engine
%manifest npu-engine.manifest
-%{_prefix}/lib/nnstreamer/filters/libnnstreamer_filter_srnpu.so
+%{_prefix}/lib/nnstreamer/filters/libnnstreamer_filter_trix-engine.so
%endif
%changelog
gst_dep = dependency('gstreamer-' + gst_api_verision)
nnstreamer_dep = dependency('nnstreamer')
-srnpu_inc = include_directories('.')
-srnpu_sources = [
- 'tensor_filter_srnpu.cc'
+trix_engine_inc = include_directories('.')
+trix_engine_sources = [
+ 'tensor_filter_trix_engine.cc'
]
base_deps = [
base_deps += dependency('dlog')
endif
-srnpu_shared = shared_library('nnstreamer_filter_srnpu',
- sources : srnpu_sources,
- include_directories : [ne_common_inc, ne_host_inc, srnpu_inc],
+trix_engine_shared = shared_library('nnstreamer_filter_trix-engine',
+ sources : trix_engine_sources,
+ include_directories : [ne_common_inc, ne_host_inc, trix_engine_inc],
dependencies : [base_deps, iniparser_dep],
link_with : ne_library_shared,
install: true,
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2020 Samsung Electronics
- * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file tensor_filter_srnpu.cc
- * @date 20 Jan 2020
- * @brief NNStreamer tensor-filter subplugin for srnpu devices
- * @see http://github.com/nnstreamer/nnstreamer
- * @see https://github.sec.samsung.net/AIP/NPU_SystemService
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs except for NYI items
- */
-
-#include <tensor_filter_srnpu.h>
-
-using namespace std;
-
-namespace nnstreamer {
-
-void init_filter_srnpu (void) __attribute__ ((constructor));
-void fini_filter_srnpu (void) __attribute__ ((destructor));
-
-TensorFilterSRNPU *TensorFilterSRNPU::registered = nullptr;
-const char *TensorFilterSRNPU::name = "srnpu";
-const accl_hw TensorFilterSRNPU::hw_list[] = {ACCL_NPU_SR};
-const int TensorFilterSRNPU::num_hw = 1;
-
-TensorFilterSRNPU::TensorFilterSRNPU ()
- : dev_type_ (NPUCOND_CONN_UNKNOWN),
- dev_ (nullptr),
- model_path_ (nullptr),
- model_meta_ (nullptr),
- model_id_ (0),
- input_layout_ (nullptr),
- output_layout_ (nullptr) {
- gst_tensors_info_init (addressof (input_info_));
- gst_tensors_info_init (addressof (output_info_));
-}
-
-TensorFilterSRNPU::~TensorFilterSRNPU () {
- g_free (model_path_);
- g_free (model_meta_);
-
- gst_tensors_info_free (std::addressof (input_info_));
- gst_tensors_info_free (std::addressof (output_info_));
-
- if (dev_ != nullptr) {
- unregisterNPUmodel_all (dev_);
- putNPUdevice (dev_);
- }
-}
-
-tensor_filter_subplugin &
-TensorFilterSRNPU::getEmptyInstance () {
- return *(new TensorFilterSRNPU ());
-}
-
-void
-TensorFilterSRNPU::configure_instance (const GstTensorFilterProperties *prop) {
- if (!prop->model_files[0] || prop->model_files[0][0] == '\0') {
- ml_loge ("Unable to find a model filepath given\n");
- throw invalid_argument ("Unable to find a model filepath given");
- }
-
- model_path_ = g_strdup (prop->model_files[0]);
- model_meta_ = getNPUmodel_metadata (model_path_, false);
- if (model_meta_ == nullptr) {
- ml_loge ("Unable to extract the model metadata\n");
- throw runtime_error ("Unable to extract the model metadata");
- }
-
- int status = -ENOENT;
- for (int i = 0; i < prop->num_hw; i++) {
- /* TRIV2 alias for now */
- if (prop->hw_list[i] == ACCL_NPU_SR) {
- status = getNPUdeviceByTypeAny (&dev_, NPUCOND_TRIV2_CONN_SOCIP, 2);
- if (status == 0)
- break;
- }
- }
-
- if (status != 0) {
- ml_loge ("Unable to find a proper NPU device\n");
- throw runtime_error ("Unable to find a proper NPU device");
- }
-
- generic_buffer model_file;
- model_file.filepath = model_path_;
- model_file.size = model_meta_->size;
- model_file.type = BUFFER_FILE;
-
- if (registerNPUmodel (dev_, &model_file, &model_id_) != 0) {
- ml_loge ("Unable to register the model\n");
- throw runtime_error ("Unable to register the model");
- }
-
- /* check user-provided input tensor info */
- if (prop->input_meta.num_tensors == 0) {
- input_info_.num_tensors = model_meta_->input_seg_num;
- for (uint32_t i = 0; i < input_info_.num_tensors; i++) {
- input_info_.info[i].type = _NNS_UINT8;
- for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
- input_info_.info[i].dimension[j] =
- model_meta_->input_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
- }
- } else {
- gst_tensors_info_copy (&input_info_, &prop->input_meta);
- }
-
- /* check user-provided output tensor info */
- if (prop->input_meta.num_tensors == 0) {
- output_info_.num_tensors = model_meta_->output_seg_num;
- for (uint32_t i = 0; i < output_info_.num_tensors; i++) {
- output_info_.info[i].type = _NNS_UINT8;
- for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
- output_info_.info[i].dimension[j] =
- model_meta_->output_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
- }
- } else {
- gst_tensors_info_copy (&output_info_, &prop->output_meta);
- }
-
- input_layout_ = &(prop->input_layout[0]);
- output_layout_ = &(prop->output_layout[0]);
-}
-
-static data_layout
-convert_data_layout (const tensor_layout &layout) {
- switch (layout) {
- case _NNS_LAYOUT_NHWC:
- return DATA_LAYOUT_NHWC;
- case _NNS_LAYOUT_NCHW:
- return DATA_LAYOUT_NCHW;
- default:
- return DATA_LAYOUT_MODEL;
- }
-}
-
-static data_type
-convert_data_type (const tensor_type &type) {
- switch (type) {
- case _NNS_INT32:
- return DATA_TYPE_INT32;
- case _NNS_UINT32:
- return DATA_TYPE_UINT32;
- case _NNS_INT16:
- return DATA_TYPE_INT16;
- case _NNS_UINT16:
- return DATA_TYPE_UINT16;
- case _NNS_INT8:
- return DATA_TYPE_INT8;
- case _NNS_UINT8:
- return DATA_TYPE_UINT8;
- case _NNS_FLOAT64:
- return DATA_TYPE_FLOAT64;
- case _NNS_FLOAT32:
- return DATA_TYPE_FLOAT32;
- case _NNS_INT64:
- return DATA_TYPE_INT64;
- case _NNS_UINT64:
- return DATA_TYPE_UINT64;
- default:
- return DATA_TYPE_MODEL;
- }
-}
-
-void
-TensorFilterSRNPU::set_data_info (tensors_data_info *in_info, tensors_data_info *out_info) {
- in_info->num_info = model_meta_->input_seg_num;
-
- for (uint32_t idx = 0; idx < in_info->num_info; ++idx) {
- in_info->info[idx].layout = convert_data_layout (input_layout_[idx]);
- in_info->info[idx].type = convert_data_type (input_info_.info[idx].type);
- }
-
- out_info->num_info = model_meta_->output_seg_num;
-
- for (uint32_t idx = 0; idx < out_info->num_info; ++idx) {
- out_info->info[idx].layout = convert_data_layout (output_layout_[idx]);
- out_info->info[idx].type = convert_data_type (output_info_.info[idx].type);
- }
-}
-
-void
-TensorFilterSRNPU::feed_input_data (const GstTensorMemory *input, input_buffers *input_buf) {
- input_buf->num_buffers = model_meta_->input_seg_num;
-
- for (uint32_t idx = 0; idx < input_buf->num_buffers; ++idx) {
- input_buf->bufs[idx].addr = input[idx].data;
- input_buf->bufs[idx].size = input[idx].size;
- input_buf->bufs[idx].type = BUFFER_MAPPED;
- }
-}
-
-void
-TensorFilterSRNPU::extract_output_data (const output_buffers *output_buf, GstTensorMemory *output) {
- /* internal logic error */
- assert (output_buf->num_buffers == model_meta_->output_seg_num);
-
- for (uint32_t idx = 0; idx < output_buf->num_buffers; ++idx) {
- output[idx].data = output_buf->bufs[idx].addr;
- output[idx].size = output_buf->bufs[idx].size;
- }
-}
-
-void
-TensorFilterSRNPU::invoke (const GstTensorMemory *input, GstTensorMemory *output) {
- int req_id;
- int status;
-
- status = createNPU_request (dev_, model_id_, &req_id);
- if (status != 0) {
- ml_loge ("Unable to create NPU request with model id (%u): %d", model_id_, status);
- return;
- }
-
- tensors_data_info in_info;
- tensors_data_info out_info;
- /* set data info using metadata */
- set_data_info (&in_info, &out_info);
-
- input_buffers input_buf = {0};
- output_buffers output_buf = {0};
- /* feed input data to npu-engine */
- feed_input_data (input, &input_buf);
-
- status = setNPU_requestData (dev_, req_id, &input_buf, &in_info, &output_buf, &out_info);
- if (status != 0) {
- ml_loge ("Unable to create NPU request for model %u", model_id_);
- return;
- }
-
- status = submitNPU_request (dev_, req_id);
- if (status != 0) {
- ml_loge ("Unable to submit NPU request with id (%u): %d", req_id, status);
- return;
- }
- /* extract output data from npu-engine */
- extract_output_data (&output_buf, output);
-
- status = removeNPU_request (dev_, req_id);
- if (status != 0) {
- ml_loge ("Unable to remove NPU request with id (%u): %d", req_id, status);
- return;
- }
-}
-
-void
-TensorFilterSRNPU::getFrameworkInfo (GstTensorFilterFrameworkInfo &info) {
- info.name = name;
- info.allow_in_place = FALSE;
- info.allocate_in_invoke = TRUE;
- info.run_without_model = FALSE;
- info.verify_model_path = TRUE;
- info.hw_list = hw_list;
- info.num_hw = num_hw;
-}
-
-int
-TensorFilterSRNPU::getModelInfo (model_info_ops ops, GstTensorsInfo &in_info,
- GstTensorsInfo &out_info) {
- if (ops != GET_IN_OUT_INFO) {
- return -ENOENT;
- }
-
- gst_tensors_info_copy (addressof (in_info), addressof (input_info_));
- gst_tensors_info_copy (addressof (out_info), addressof (output_info_));
- return 0;
-}
-
-int
-TensorFilterSRNPU::eventHandler (event_ops ops, GstTensorFilterFrameworkEventData &data) {
- return -ENOENT;
-}
-
-void
-TensorFilterSRNPU::init_filter_srnpu () {
- registered = tensor_filter_subplugin::register_subplugin<TensorFilterSRNPU> ();
-}
-
-void
-TensorFilterSRNPU::fini_filter_srnpu () {
- /* internal logic error */
- assert (registered != nullptr);
- tensor_filter_subplugin::unregister_subplugin (registered);
-}
-
-void
-init_filter_srnpu () {
- TensorFilterSRNPU::init_filter_srnpu ();
-}
-
-void
-fini_filter_srnpu () {
- TensorFilterSRNPU::fini_filter_srnpu ();
-}
-
-} /* namespace nnstreamer */
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2020 Samsung Electronics
- * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file tensor_filter_subplugin_srnpu.h
- * @date 20 Jan 2020
- * @brief NNStreamer tensor-filter subplugin srnpu header
- * @see https://github.com/nnsuite/nnstreamer
- * @see https://github.sec.samsung.net/AIP/NPU_SystemService
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs
- */
-
-#ifndef __TENSOR_FILTER_SUBPLUGIN_SRNPU_H__
-
-/* npu-engine headers */
-#include <npubinfmt.h>
-#include <libnpuhost.h>
-
-/* nnstreamer plugin api headers */
-#include <nnstreamer_plugin_api.h>
-#include <nnstreamer_cppplugin_api_filter.hh>
-
-#if defined(__TIZEN__)
-#include <dlog.h>
-#define TAG_NAME "nnstreamer_srnpu"
-#define ml_logi(...) dlog_print (DLOG_INFO, TAG_NAME, __VA_ARGS__)
-#define ml_logw(...) dlog_print (DLOG_WARN, TAG_NAME, __VA_ARGS__)
-#define ml_loge(...) dlog_print (DLOG_ERROR, TAG_NAME, __VA_ARGS__)
-#define ml_logd(...) dlog_print (DLOG_DEBUG, TAG_NAME, __VA_ARGS__)
-#define ml_logf(...) dlog_print (DLOG_FATAL, TAG_NAME, __VA_ARGS__)
-#else
-#define ml_logi g_info
-#define ml_logw g_warning
-#define ml_loge g_critical
-#define ml_logd g_debug
-#define ml_logf g_error
-#endif
-
-namespace nnstreamer {
-
-class TensorFilterSRNPU : public tensor_filter_subplugin {
- public:
- TensorFilterSRNPU ();
- ~TensorFilterSRNPU ();
-
- /* mandatory methods */
- tensor_filter_subplugin &getEmptyInstance ();
- void configure_instance (const GstTensorFilterProperties *prop);
- void invoke (const GstTensorMemory *input, GstTensorMemory *output);
- void getFrameworkInfo (GstTensorFilterFrameworkInfo &info);
- int getModelInfo (model_info_ops ops, GstTensorsInfo &in_info, GstTensorsInfo &out_info);
- int eventHandler (event_ops ops, GstTensorFilterFrameworkEventData &data);
-
- /* static methods */
- static void init_filter_srnpu ();
- static void fini_filter_srnpu ();
-
- private:
- void set_data_info (tensors_data_info *in_info, tensors_data_info *out_info);
- void feed_input_data (const GstTensorMemory *input, input_buffers *input_buf);
- void extract_output_data (const output_buffers *output_buf, GstTensorMemory *output);
-
- static TensorFilterSRNPU *registered;
- static const char *name;
- static const accl_hw hw_list[];
- static const int num_hw;
-
- /* npu-engine */
- dev_type dev_type_;
- npudev_h dev_;
- gchar *model_path_;
- npubin_meta *model_meta_;
- uint32_t model_id_;
-
- /* nnstreamer */
- const tensor_layout *input_layout_; /**< The data layout of input tensors */
- const tensor_layout *output_layout_; /**< The data layout of output tensors */
- GstTensorsInfo input_info_; /**< The data info of input tensors */
- GstTensorsInfo output_info_; /**< The data info of output tensors */
-};
-
-} /* namespace nnstreamer */
-
-#endif /* __TENSOR_FILTER_SUBPLUGIN_H_SRNPU_ */
--- /dev/null
+/**
+ * Proprietary
+ * Copyright (C) 2020 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
+ */
+/**
+ * @file tensor_filter_trix_engine.cc
+ * @date 20 Jan 2020
+ * @brief NNStreamer tensor-filter subplugin for TRIx devices
+ * @see http://github.com/nnstreamer/nnstreamer
+ * @see https://github.sec.samsung.net/AIP/NPU_SystemService
+ * @author Dongju Chae <dongju.chae@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <tensor_filter_trix_engine.h>
+
+using namespace std;
+
+namespace nnstreamer {
+
+void init_filter_trix_engine (void) __attribute__ ((constructor));
+void fini_filter_trix_engine (void) __attribute__ ((destructor));
+
+TensorFilterTRIxEngine *TensorFilterTRIxEngine::registered = nullptr;
+const char *TensorFilterTRIxEngine::name = "trix-engine";
+const accl_hw TensorFilterTRIxEngine::hw_list[] = {ACCL_NPU_SR};
+const int TensorFilterTRIxEngine::num_hw = 1;
+
+TensorFilterTRIxEngine::TensorFilterTRIxEngine ()
+ : dev_type_ (NPUCOND_CONN_UNKNOWN),
+ dev_ (nullptr),
+ model_path_ (nullptr),
+ model_meta_ (nullptr),
+ model_id_ (0),
+ input_layout_ (nullptr),
+ output_layout_ (nullptr) {
+ gst_tensors_info_init (addressof (input_info_));
+ gst_tensors_info_init (addressof (output_info_));
+}
+
+TensorFilterTRIxEngine::~TensorFilterTRIxEngine () {
+ g_free (model_path_);
+ g_free (model_meta_);
+
+ gst_tensors_info_free (std::addressof (input_info_));
+ gst_tensors_info_free (std::addressof (output_info_));
+
+ if (dev_ != nullptr) {
+ unregisterNPUmodel_all (dev_);
+ putNPUdevice (dev_);
+ }
+}
+
+tensor_filter_subplugin &
+TensorFilterTRIxEngine::getEmptyInstance () {
+ return *(new TensorFilterTRIxEngine ());
+}
+
+void
+TensorFilterTRIxEngine::configure_instance (const GstTensorFilterProperties *prop) {
+ if (!prop->model_files[0] || prop->model_files[0][0] == '\0') {
+ ml_loge ("Unable to find a model filepath given\n");
+ throw invalid_argument ("Unable to find a model filepath given");
+ }
+
+ model_path_ = g_strdup (prop->model_files[0]);
+ model_meta_ = getNPUmodel_metadata (model_path_, false);
+ if (model_meta_ == nullptr) {
+ ml_loge ("Unable to extract the model metadata\n");
+ throw runtime_error ("Unable to extract the model metadata");
+ }
+
+ int status = -ENOENT;
+ for (int i = 0; i < prop->num_hw; i++) {
+ /* TRIV2 alias for now */
+ if (prop->hw_list[i] == ACCL_NPU_SR) {
+ status = getNPUdeviceByTypeAny (&dev_, NPUCOND_TRIV2_CONN_SOCIP, 2);
+ if (status == 0)
+ break;
+ }
+ }
+
+ if (status != 0) {
+ ml_loge ("Unable to find a proper NPU device\n");
+ throw runtime_error ("Unable to find a proper NPU device");
+ }
+
+ generic_buffer model_file;
+ model_file.filepath = model_path_;
+ model_file.size = model_meta_->size;
+ model_file.type = BUFFER_FILE;
+
+ if (registerNPUmodel (dev_, &model_file, &model_id_) != 0) {
+ ml_loge ("Unable to register the model\n");
+ throw runtime_error ("Unable to register the model");
+ }
+
+ /* check user-provided input tensor info */
+ if (prop->input_meta.num_tensors == 0) {
+ input_info_.num_tensors = model_meta_->input_seg_num;
+ for (uint32_t i = 0; i < input_info_.num_tensors; i++) {
+ input_info_.info[i].type = _NNS_UINT8;
+ for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
+ input_info_.info[i].dimension[j] =
+ model_meta_->input_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
+ }
+ } else {
+ gst_tensors_info_copy (&input_info_, &prop->input_meta);
+ }
+
+ /* check user-provided output tensor info */
+ if (prop->input_meta.num_tensors == 0) {
+ output_info_.num_tensors = model_meta_->output_seg_num;
+ for (uint32_t i = 0; i < output_info_.num_tensors; i++) {
+ output_info_.info[i].type = _NNS_UINT8;
+ for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
+ output_info_.info[i].dimension[j] =
+ model_meta_->output_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
+ }
+ } else {
+ gst_tensors_info_copy (&output_info_, &prop->output_meta);
+ }
+
+ input_layout_ = &(prop->input_layout[0]);
+ output_layout_ = &(prop->output_layout[0]);
+}
+
+static data_layout
+convert_data_layout (const tensor_layout &layout) {
+ switch (layout) {
+ case _NNS_LAYOUT_NHWC:
+ return DATA_LAYOUT_NHWC;
+ case _NNS_LAYOUT_NCHW:
+ return DATA_LAYOUT_NCHW;
+ default:
+ return DATA_LAYOUT_MODEL;
+ }
+}
+
+static data_type
+convert_data_type (const tensor_type &type) {
+ switch (type) {
+ case _NNS_INT32:
+ return DATA_TYPE_INT32;
+ case _NNS_UINT32:
+ return DATA_TYPE_UINT32;
+ case _NNS_INT16:
+ return DATA_TYPE_INT16;
+ case _NNS_UINT16:
+ return DATA_TYPE_UINT16;
+ case _NNS_INT8:
+ return DATA_TYPE_INT8;
+ case _NNS_UINT8:
+ return DATA_TYPE_UINT8;
+ case _NNS_FLOAT64:
+ return DATA_TYPE_FLOAT64;
+ case _NNS_FLOAT32:
+ return DATA_TYPE_FLOAT32;
+ case _NNS_INT64:
+ return DATA_TYPE_INT64;
+ case _NNS_UINT64:
+ return DATA_TYPE_UINT64;
+ default:
+ return DATA_TYPE_MODEL;
+ }
+}
+
+void
+TensorFilterTRIxEngine::set_data_info (tensors_data_info *in_info, tensors_data_info *out_info) {
+ in_info->num_info = model_meta_->input_seg_num;
+
+ for (uint32_t idx = 0; idx < in_info->num_info; ++idx) {
+ in_info->info[idx].layout = convert_data_layout (input_layout_[idx]);
+ in_info->info[idx].type = convert_data_type (input_info_.info[idx].type);
+ }
+
+ out_info->num_info = model_meta_->output_seg_num;
+
+ for (uint32_t idx = 0; idx < out_info->num_info; ++idx) {
+ out_info->info[idx].layout = convert_data_layout (output_layout_[idx]);
+ out_info->info[idx].type = convert_data_type (output_info_.info[idx].type);
+ }
+}
+
+void
+TensorFilterTRIxEngine::feed_input_data (const GstTensorMemory *input, input_buffers *input_buf) {
+ input_buf->num_buffers = model_meta_->input_seg_num;
+
+ for (uint32_t idx = 0; idx < input_buf->num_buffers; ++idx) {
+ input_buf->bufs[idx].addr = input[idx].data;
+ input_buf->bufs[idx].size = input[idx].size;
+ input_buf->bufs[idx].type = BUFFER_MAPPED;
+ }
+}
+
+void
+TensorFilterTRIxEngine::extract_output_data (const output_buffers *output_buf,
+ GstTensorMemory *output) {
+ /* internal logic error */
+ assert (output_buf->num_buffers == model_meta_->output_seg_num);
+
+ for (uint32_t idx = 0; idx < output_buf->num_buffers; ++idx) {
+ output[idx].data = output_buf->bufs[idx].addr;
+ output[idx].size = output_buf->bufs[idx].size;
+ }
+}
+
+void
+TensorFilterTRIxEngine::invoke (const GstTensorMemory *input, GstTensorMemory *output) {
+ int req_id;
+ int status;
+
+ status = createNPU_request (dev_, model_id_, &req_id);
+ if (status != 0) {
+ ml_loge ("Unable to create NPU request with model id (%u): %d", model_id_, status);
+ return;
+ }
+
+ tensors_data_info in_info;
+ tensors_data_info out_info;
+ /* set data info using metadata */
+ set_data_info (&in_info, &out_info);
+
+ input_buffers input_buf = {0};
+ output_buffers output_buf = {0};
+ /* feed input data to npu-engine */
+ feed_input_data (input, &input_buf);
+
+ status = setNPU_requestData (dev_, req_id, &input_buf, &in_info, &output_buf, &out_info);
+ if (status != 0) {
+ ml_loge ("Unable to create NPU request for model %u", model_id_);
+ return;
+ }
+
+ status = submitNPU_request (dev_, req_id);
+ if (status != 0) {
+ ml_loge ("Unable to submit NPU request with id (%u): %d", req_id, status);
+ return;
+ }
+ /* extract output data from npu-engine */
+ extract_output_data (&output_buf, output);
+
+ status = removeNPU_request (dev_, req_id);
+ if (status != 0) {
+ ml_loge ("Unable to remove NPU request with id (%u): %d", req_id, status);
+ return;
+ }
+}
+
+void
+TensorFilterTRIxEngine::getFrameworkInfo (GstTensorFilterFrameworkInfo &info) {
+ info.name = name;
+ info.allow_in_place = FALSE;
+ info.allocate_in_invoke = TRUE;
+ info.run_without_model = FALSE;
+ info.verify_model_path = TRUE;
+ info.hw_list = hw_list;
+ info.num_hw = num_hw;
+}
+
+int
+TensorFilterTRIxEngine::getModelInfo (model_info_ops ops, GstTensorsInfo &in_info,
+ GstTensorsInfo &out_info) {
+ if (ops != GET_IN_OUT_INFO) {
+ return -ENOENT;
+ }
+
+ gst_tensors_info_copy (addressof (in_info), addressof (input_info_));
+ gst_tensors_info_copy (addressof (out_info), addressof (output_info_));
+ return 0;
+}
+
+int
+TensorFilterTRIxEngine::eventHandler (event_ops ops, GstTensorFilterFrameworkEventData &data) {
+ return -ENOENT;
+}
+
+void
+TensorFilterTRIxEngine::init_filter_trix_engine () {
+ registered = tensor_filter_subplugin::register_subplugin<TensorFilterTRIxEngine> ();
+}
+
+void
+TensorFilterTRIxEngine::fini_filter_trix_engine () {
+ /* internal logic error */
+ assert (registered != nullptr);
+ tensor_filter_subplugin::unregister_subplugin (registered);
+}
+
+void
+init_filter_trix_engine () {
+ TensorFilterTRIxEngine::init_filter_trix_engine ();
+}
+
+void
+fini_filter_trix_engine () {
+ TensorFilterTRIxEngine::fini_filter_trix_engine ();
+}
+
+} /* namespace nnstreamer */
--- /dev/null
+/**
+ * Proprietary
+ * Copyright (C) 2020 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
+ */
+/**
+ * @file tensor_filter_subplugin_trix_engine.h
+ * @date 20 Jan 2020
+ * @brief NNStreamer tensor-filter subplugin trix_engine header
+ * @see https://github.com/nnsuite/nnstreamer
+ * @see https://github.sec.samsung.net/AIP/NPU_SystemService
+ * @author Dongju Chae <dongju.chae@samsung.com>
+ * @bug No known bugs
+ */
+
+#ifndef __TENSOR_FILTER_SUBPLUGIN_TRIxEngine_H__
+
+/* npu-engine headers */
+#include <npubinfmt.h>
+#include <libnpuhost.h>
+
+/* nnstreamer plugin api headers */
+#include <nnstreamer_plugin_api.h>
+#include <nnstreamer_cppplugin_api_filter.hh>
+
+#if defined(__TIZEN__)
+#include <dlog.h>
+#define TAG_NAME "nnstreamer_trix_engine"
+#define ml_logi(...) dlog_print (DLOG_INFO, TAG_NAME, __VA_ARGS__)
+#define ml_logw(...) dlog_print (DLOG_WARN, TAG_NAME, __VA_ARGS__)
+#define ml_loge(...) dlog_print (DLOG_ERROR, TAG_NAME, __VA_ARGS__)
+#define ml_logd(...) dlog_print (DLOG_DEBUG, TAG_NAME, __VA_ARGS__)
+#define ml_logf(...) dlog_print (DLOG_FATAL, TAG_NAME, __VA_ARGS__)
+#else
+#define ml_logi g_info
+#define ml_logw g_warning
+#define ml_loge g_critical
+#define ml_logd g_debug
+#define ml_logf g_error
+#endif
+
+namespace nnstreamer {
+
+class TensorFilterTRIxEngine : public tensor_filter_subplugin {
+ public:
+ TensorFilterTRIxEngine ();
+ ~TensorFilterTRIxEngine ();
+
+ /* mandatory methods */
+ tensor_filter_subplugin &getEmptyInstance ();
+ void configure_instance (const GstTensorFilterProperties *prop);
+ void invoke (const GstTensorMemory *input, GstTensorMemory *output);
+ void getFrameworkInfo (GstTensorFilterFrameworkInfo &info);
+ int getModelInfo (model_info_ops ops, GstTensorsInfo &in_info, GstTensorsInfo &out_info);
+ int eventHandler (event_ops ops, GstTensorFilterFrameworkEventData &data);
+
+ /* static methods */
+ static void init_filter_trix_engine ();
+ static void fini_filter_trix_engine ();
+
+ private:
+ void set_data_info (tensors_data_info *in_info, tensors_data_info *out_info);
+ void feed_input_data (const GstTensorMemory *input, input_buffers *input_buf);
+ void extract_output_data (const output_buffers *output_buf, GstTensorMemory *output);
+
+ static TensorFilterTRIxEngine *registered;
+ static const char *name;
+ static const accl_hw hw_list[];
+ static const int num_hw;
+
+ /* npu-engine */
+ dev_type dev_type_;
+ npudev_h dev_;
+ gchar *model_path_;
+ npubin_meta *model_meta_;
+ uint32_t model_id_;
+
+ /* nnstreamer */
+ const tensor_layout *input_layout_; /**< The data layout of input tensors */
+ const tensor_layout *output_layout_; /**< The data layout of output tensors */
+ GstTensorsInfo input_info_; /**< The data info of input tensors */
+ GstTensorsInfo output_info_; /**< The data info of output tensors */
+};
+
+} /* namespace nnstreamer */
+
+#endif /* __TENSOR_FILTER_TRIxEngine_SUBPLUGIN_H__ */