This patch revises nnstreamer sub-plugin codes using C++ sub-plugin APIs.
Signed-off-by: Dongju Chae <dongju.chae@samsung.com>
gst_dep = dependency('gstreamer-' + gst_api_verision)
nnstreamer_dep = dependency('nnstreamer')
-common_inc = include_directories('.')
-common_sources = [
- 'tensor_filter_subplugin.cc'
-]
-triv2_sources = [
- 'tensor_filter_subplugin_triv2.cc'
+srnpu_inc = include_directories('.')
+srnpu_sources = [
+ 'tensor_filter_srnpu.cc'
]
base_deps = [
base_deps += dependency('dlog')
endif
-subplugin_common_dep = declare_dependency(
- sources : common_sources,
- link_with : ne_library_shared,
+srnpu_shared = shared_library('nnstreamer_filter_srnpu',
+ sources : srnpu_sources,
+ include_directories : [ne_common_inc, ne_host_inc, srnpu_inc],
dependencies : [base_deps, iniparser_dep],
- include_directories : [common_inc, ne_common_inc, ne_host_inc]
-)
-
-triv2_shared = shared_library('nnstreamer_filter_srnpu',
- triv2_sources,
link_with : ne_library_shared,
- dependencies: subplugin_common_dep,
install: true,
install_dir: join_paths(get_option('prefix'), 'lib', 'nnstreamer', 'filters')
)
--- /dev/null
+/**
+ * Proprietary
+ * Copyright (C) 2020 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
+ */
+/**
+ * @file tensor_filter_srnpu.cc
+ * @date 20 Jan 2020
+ * @brief NNStreamer tensor-filter subplugin for srnpu devices
+ * @see http://github.com/nnstreamer/nnstreamer
+ * @see https://github.sec.samsung.net/AIP/NPU_SystemService
+ * @author Dongju Chae <dongju.chae@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+
+#include <tensor_filter_srnpu.h>
+
+using namespace std;
+
+namespace nnstreamer {
+
+void init_filter_srnpu (void) __attribute__ ((constructor));
+void fini_filter_srnpu (void) __attribute__ ((destructor));
+
+TensorFilterSRNPU *TensorFilterSRNPU::registered = nullptr;
+const char *TensorFilterSRNPU::name = "srnpu";
+const accl_hw TensorFilterSRNPU::hw_list[] = {ACCL_NPU_SR};
+const int TensorFilterSRNPU::num_hw = 1;
+
+TensorFilterSRNPU::TensorFilterSRNPU ()
+ : dev_type_ (NPUCOND_CONN_UNKNOWN),
+ dev_ (nullptr),
+ model_path_ (nullptr),
+ model_meta_ (nullptr),
+ model_id_ (0),
+ input_layout_ (nullptr),
+ output_layout_ (nullptr) {
+ gst_tensors_info_init (addressof (input_info_));
+ gst_tensors_info_init (addressof (output_info_));
+}
+
+TensorFilterSRNPU::~TensorFilterSRNPU () {
+ g_free (model_path_);
+ g_free (model_meta_);
+
+ gst_tensors_info_free (std::addressof (input_info_));
+ gst_tensors_info_free (std::addressof (output_info_));
+
+ if (dev_ != nullptr) {
+ unregisterNPUmodel_all (dev_);
+ putNPUdevice (dev_);
+ }
+}
+
+tensor_filter_subplugin &
+TensorFilterSRNPU::getEmptyInstance () {
+ return *(new TensorFilterSRNPU ());
+}
+
+void
+TensorFilterSRNPU::configure_instance (const GstTensorFilterProperties *prop) {
+ if (!prop->model_files[0] || prop->model_files[0][0] == '\0') {
+ ml_loge ("Unable to find a model filepath given\n");
+ throw invalid_argument ("Unable to find a model filepath given");
+ }
+
+ model_path_ = g_strdup (prop->model_files[0]);
+ model_meta_ = getNPUmodel_metadata (model_path_, false);
+ if (model_meta_ == nullptr) {
+ ml_loge ("Unable to extract the model metadata\n");
+ throw runtime_error ("Unable to extract the model metadata");
+ }
+
+ int status = -ENOENT;
+ for (int i = 0; i < prop->num_hw; i++) {
+ /* TRIV2 alias for now */
+ if (prop->hw_list[i] == ACCL_NPU_SR) {
+ status = getNPUdeviceByTypeAny (&dev_, NPUCOND_TRIV2_CONN_SOCIP, 2);
+ if (status == 0)
+ break;
+ }
+ }
+
+ if (status != 0) {
+ ml_loge ("Unable to find a proper NPU device\n");
+ throw runtime_error ("Unable to find a proper NPU device");
+ }
+
+ generic_buffer model_file;
+ model_file.filepath = model_path_;
+ model_file.size = model_meta_->size;
+ model_file.type = BUFFER_FILE;
+
+ if (registerNPUmodel (dev_, &model_file, &model_id_) != 0) {
+ ml_loge ("Unable to register the model\n");
+ throw runtime_error ("Unable to register the model");
+ }
+
+ /* check user-provided input tensor info */
+ if (prop->input_meta.num_tensors == 0) {
+ input_info_.num_tensors = model_meta_->input_seg_num;
+ for (uint32_t i = 0; i < input_info_.num_tensors; i++) {
+ input_info_.info[i].type = _NNS_UINT8;
+ for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
+ input_info_.info[i].dimension[j] =
+ model_meta_->input_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
+ }
+ } else {
+ gst_tensors_info_copy (&input_info_, &prop->input_meta);
+ }
+
+ /* check user-provided output tensor info */
+ if (prop->input_meta.num_tensors == 0) {
+ output_info_.num_tensors = model_meta_->output_seg_num;
+ for (uint32_t i = 0; i < output_info_.num_tensors; i++) {
+ output_info_.info[i].type = _NNS_UINT8;
+ for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; j++)
+ output_info_.info[i].dimension[j] =
+ model_meta_->output_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
+ }
+ } else {
+ gst_tensors_info_copy (&output_info_, &prop->output_meta);
+ }
+
+ input_layout_ = &(prop->input_layout[0]);
+ output_layout_ = &(prop->output_layout[0]);
+}
+
+static data_layout
+convert_data_layout (const tensor_layout &layout) {
+ switch (layout) {
+ case _NNS_LAYOUT_NHWC:
+ return DATA_LAYOUT_NHWC;
+ case _NNS_LAYOUT_NCHW:
+ return DATA_LAYOUT_NCHW;
+ default:
+ return DATA_LAYOUT_MODEL;
+ }
+}
+
+static data_type
+convert_data_type (const tensor_type &type) {
+ switch (type) {
+ case _NNS_INT32:
+ return DATA_TYPE_INT32;
+ case _NNS_UINT32:
+ return DATA_TYPE_UINT32;
+ case _NNS_INT16:
+ return DATA_TYPE_INT16;
+ case _NNS_UINT16:
+ return DATA_TYPE_UINT16;
+ case _NNS_INT8:
+ return DATA_TYPE_INT8;
+ case _NNS_UINT8:
+ return DATA_TYPE_UINT8;
+ case _NNS_FLOAT64:
+ return DATA_TYPE_FLOAT64;
+ case _NNS_FLOAT32:
+ return DATA_TYPE_FLOAT32;
+ case _NNS_INT64:
+ return DATA_TYPE_INT64;
+ case _NNS_UINT64:
+ return DATA_TYPE_UINT64;
+ default:
+ return DATA_TYPE_MODEL;
+ }
+}
+
+void
+TensorFilterSRNPU::set_data_info (tensors_data_info *in_info,
+ tensors_data_info *out_info) {
+ in_info->num_info = model_meta_->input_seg_num;
+
+ for (uint32_t idx = 0; idx < in_info->num_info; ++idx) {
+ in_info->info[idx].layout = convert_data_layout (input_layout_[idx]);
+ in_info->info[idx].type = convert_data_type (input_info_.info[idx].type);
+ }
+
+ out_info->num_info = model_meta_->output_seg_num;
+
+ for (uint32_t idx = 0; idx < out_info->num_info; ++idx) {
+ out_info->info[idx].layout = convert_data_layout (output_layout_[idx]);
+ out_info->info[idx].type = convert_data_type (output_info_.info[idx].type);
+ }
+}
+
+void
+TensorFilterSRNPU::feed_input_data (const GstTensorMemory *input,
+ input_buffers *input_buf) {
+ input_buf->num_buffers = model_meta_->input_seg_num;
+
+ for (uint32_t idx = 0; idx < input_buf->num_buffers; ++idx) {
+ input_buf->bufs[idx].addr = input[idx].data;
+ input_buf->bufs[idx].size = input[idx].size;
+ input_buf->bufs[idx].type = BUFFER_MAPPED;
+ }
+}
+
+void
+TensorFilterSRNPU::extract_output_data (const output_buffers *output_buf,
+ GstTensorMemory *output) {
+ /* internal logic error */
+ assert (output_buf->num_buffers == model_meta_->output_seg_num);
+
+ for (uint32_t idx = 0; idx < output_buf->num_buffers; ++idx) {
+ output[idx].data = output_buf->bufs[idx].addr;
+ output[idx].size = output_buf->bufs[idx].size;
+ }
+}
+
+void
+TensorFilterSRNPU::invoke (const GstTensorMemory *input,
+ GstTensorMemory *output) {
+ int req_id;
+ int status;
+
+ status = createNPU_request (dev_, model_id_, &req_id);
+ if (status != 0) {
+ ml_loge ("Unable to create NPU request with model id (%u): %d", model_id_,
+ status);
+ return;
+ }
+
+ tensors_data_info in_info;
+ tensors_data_info out_info;
+ /* set data info using metadata */
+ set_data_info (&in_info, &out_info);
+
+ input_buffers input_buf = {0};
+ output_buffers output_buf = {0};
+ /* feed input data to npu-engine */
+ feed_input_data (input, &input_buf);
+
+ status = setNPU_requestData (dev_, req_id, &input_buf, &in_info, &output_buf,
+ &out_info);
+ if (status != 0) {
+ ml_loge ("Unable to create NPU request for model %u", model_id_);
+ return;
+ }
+
+ status = submitNPU_request (dev_, req_id);
+ if (status != 0) {
+ ml_loge ("Unable to submit NPU request with id (%u): %d", req_id, status);
+ return;
+ }
+ /* extract output data from npu-engine */
+ extract_output_data (&output_buf, output);
+
+ status = removeNPU_request (dev_, req_id);
+ if (status != 0) {
+ ml_loge ("Unable to remove NPU request with id (%u): %d", req_id, status);
+ return;
+ }
+}
+
+void
+TensorFilterSRNPU::getFrameworkInfo (GstTensorFilterFrameworkInfo &info) {
+ info.name = name;
+ info.allow_in_place = FALSE;
+ info.allocate_in_invoke = TRUE;
+ info.run_without_model = FALSE;
+ info.verify_model_path = TRUE;
+ info.hw_list = hw_list;
+ info.num_hw = num_hw;
+}
+
+int
+TensorFilterSRNPU::getModelInfo (model_info_ops ops, GstTensorsInfo &in_info,
+ GstTensorsInfo &out_info) {
+ if (ops != GET_IN_OUT_INFO) {
+ return -ENOENT;
+ }
+
+ gst_tensors_info_copy (addressof (in_info), addressof (input_info_));
+ gst_tensors_info_copy (addressof (out_info), addressof (output_info_));
+ return 0;
+}
+
+int
+TensorFilterSRNPU::eventHandler (event_ops ops,
+ GstTensorFilterFrameworkEventData &data) {
+ return -ENOENT;
+}
+
+void
+TensorFilterSRNPU::init_filter_srnpu () {
+ registered =
+ tensor_filter_subplugin::register_subplugin<TensorFilterSRNPU> ();
+}
+
+void
+TensorFilterSRNPU::fini_filter_srnpu () {
+ /* internal logic error */
+ assert (registered != nullptr);
+ tensor_filter_subplugin::unregister_subplugin (registered);
+}
+
+void
+init_filter_srnpu () {
+ TensorFilterSRNPU::init_filter_srnpu ();
+}
+
+void
+fini_filter_srnpu () {
+ TensorFilterSRNPU::fini_filter_srnpu ();
+}
+
+} /* namespace nnstreamer */
--- /dev/null
+/**
+ * Proprietary
+ * Copyright (C) 2020 Samsung Electronics
+ * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
+ */
+/**
+ * @file tensor_filter_subplugin_srnpu.h
+ * @date 20 Jan 2020
+ * @brief NNStreamer tensor-filter subplugin srnpu header
+ * @see https://github.com/nnsuite/nnstreamer
+ * @see https://github.sec.samsung.net/AIP/NPU_SystemService
+ * @author Dongju Chae <dongju.chae@samsung.com>
+ * @bug No known bugs
+ */
+
+#ifndef __TENSOR_FILTER_SUBPLUGIN_SRNPU_H__
+
+/* npu-engine headers */
+#include <npubinfmt.h>
+#include <libnpuhost.h>
+
+/* nnstreamer plugin api headers */
+#include <nnstreamer_plugin_api.h>
+#include <nnstreamer_cppplugin_api_filter.hh>
+
+#if defined(__TIZEN__)
+#include <dlog.h>
+#define TAG_NAME "nnstreamer_srnpu"
+#define ml_logi(...) dlog_print (DLOG_INFO, TAG_NAME, __VA_ARGS__)
+#define ml_logw(...) dlog_print (DLOG_WARN, TAG_NAME, __VA_ARGS__)
+#define ml_loge(...) dlog_print (DLOG_ERROR, TAG_NAME, __VA_ARGS__)
+#define ml_logd(...) dlog_print (DLOG_DEBUG, TAG_NAME, __VA_ARGS__)
+#define ml_logf(...) dlog_print (DLOG_FATAL, TAG_NAME, __VA_ARGS__)
+#else
+#define ml_logi g_info
+#define ml_logw g_warning
+#define ml_loge g_critical
+#define ml_logd g_debug
+#define ml_logf g_error
+#endif
+
+namespace nnstreamer {
+
+class TensorFilterSRNPU : public tensor_filter_subplugin {
+ public:
+ TensorFilterSRNPU ();
+ ~TensorFilterSRNPU ();
+
+ /* mandatory methods */
+ tensor_filter_subplugin &getEmptyInstance ();
+ void configure_instance (const GstTensorFilterProperties *prop);
+ void invoke (const GstTensorMemory *input, GstTensorMemory *output);
+ void getFrameworkInfo (GstTensorFilterFrameworkInfo &info);
+ int getModelInfo (model_info_ops ops, GstTensorsInfo &in_info,
+ GstTensorsInfo &out_info);
+ int eventHandler (event_ops ops, GstTensorFilterFrameworkEventData &data);
+
+ /* static methods */
+ static void init_filter_srnpu ();
+ static void fini_filter_srnpu ();
+
+ private:
+ void set_data_info (tensors_data_info *in_info, tensors_data_info *out_info);
+ void feed_input_data (const GstTensorMemory *input, input_buffers *input_buf);
+ void extract_output_data (const output_buffers *output_buf,
+ GstTensorMemory *output);
+
+ static TensorFilterSRNPU *registered;
+ static const char *name;
+ static const accl_hw hw_list[];
+ static const int num_hw;
+
+ /* npu-engine */
+ dev_type dev_type_;
+ npudev_h dev_;
+ gchar *model_path_;
+ npubin_meta *model_meta_;
+ uint32_t model_id_;
+
+ /* nnstreamer */
+ const tensor_layout *input_layout_; /**< The data layout of input tensors */
+ const tensor_layout *output_layout_; /**< The data layout of output tensors */
+ GstTensorsInfo input_info_; /**< The data info of input tensors */
+ GstTensorsInfo output_info_; /**< The data info of output tensors */
+};
+
+} /* namespace nnstreamer */
+
+#endif /* __TENSOR_FILTER_SUBPLUGIN_H_SRNPU_ */
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2020 Samsung Electronics
- * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file tensor_filter_subplugin.cc
- * @date 20 Jan 2020
- * @brief NNStreamer tensor-filter subplugin for srnpu devices
- * @see http://github.com/nnstreamer/nnstreamer
- * @see https://github.sec.samsung.net/AIP/NPU_SystemService
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs except for NYI items
- */
-
-#include "tensor_filter_subplugin.h"
-
-TensorFilterSRNPU::TensorFilterSRNPU (dev_type type)
- : dev_ (nullptr), type_ (type), meta_ (nullptr), model_id_ (0) {}
-
-TensorFilterSRNPU::~TensorFilterSRNPU () {
- if (dev_ != nullptr) {
- unregisterNPUmodel_all (dev_);
- putNPUdevice (dev_);
- }
-
- if (meta_ != nullptr)
- free (meta_);
-}
-
-int
-TensorFilterSRNPU::open () {
- int num_devices = getnumNPUdeviceByType (type_);
- if (num_devices <= 0)
- return -ENODEV;
-
- /* TODO: this always uses the first device */
- return getNPUdeviceByType (&dev_, type_, 0);
-}
-
-int
-TensorFilterSRNPU::invoke (const GstTensorFilterProperties* prop,
- const GstTensorMemory* input,
- GstTensorMemory* output) {
- input_buffers input_buf;
- output_buffers output_buf;
-
- input_buf.num_buffers = (prop->input_meta).num_tensors;
- for (uint32_t idx = 0; idx < input_buf.num_buffers; ++idx) {
- input_buf.bufs[idx].addr = input[idx].data;
- input_buf.bufs[idx].size = input[idx].size;
- input_buf.bufs[idx].type = BUFFER_MAPPED;
- }
-
- int status = runNPU_sync (dev_, model_id_, &input_buf, &output_buf);
- if (status < 0) {
- ml_loge ("runNPU_sync() failed: %d\n", status);
- return status;
- }
-
- /* extract output buffers; their data will be freed in destroyNotify() */
- if ((prop->output_meta).num_tensors == output_buf.num_buffers) {
- for (uint32_t idx = 0; idx < output_buf.num_buffers; ++idx) {
- output[idx].data = output_buf.bufs[idx].addr;
- }
- } else {
- ml_loge ("Wrong number of output tensors");
- return -EINVAL;
- }
-
- return 0;
-}
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2020 Samsung Electronics
- * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file tensor_filter_subplugin.h
- * @date 20 Jan 2020
- * @brief NNStreamer tensor-filter subplugin srnpu header
- * @see https://github.com/nnsuite/nnstreamer
- * @see https://github.sec.samsung.net/AIP/NPU_SystemService
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs
- */
-
-#ifndef __TENSOR_FILTER_SUBPLUGIN_H__
-
-/* npu-engine headers */
-#include <npubinfmt.h>
-#include <libnpuhost.h>
-
-/* nnstreamer plugin api headers */
-#include <nnstreamer_plugin_api.h>
-#include <nnstreamer_plugin_api_filter.h>
-
-/* stdlib */
-#include <string>
-
-#if defined(__TIZEN__)
-#include <dlog.h>
-#define TAG_NAME "nnstreamer_srnpu"
-#define ml_logi(...) dlog_print (DLOG_INFO, TAG_NAME, __VA_ARGS__)
-#define ml_logw(...) dlog_print (DLOG_WARN, TAG_NAME, __VA_ARGS__)
-#define ml_loge(...) dlog_print (DLOG_ERROR, TAG_NAME, __VA_ARGS__)
-#define ml_logd(...) dlog_print (DLOG_DEBUG, TAG_NAME, __VA_ARGS__)
-#define ml_logf(...) dlog_print (DLOG_FATAL, TAG_NAME, __VA_ARGS__)
-#else
-#define ml_logi g_info
-#define ml_logw g_warning
-#define ml_loge g_critical
-#define ml_logd g_debug
-#define ml_logf g_error
-#endif
-
-class TensorFilterSRNPU {
- public:
- TensorFilterSRNPU (dev_type type);
- virtual ~TensorFilterSRNPU ();
-
- int open ();
- int invoke (const GstTensorFilterProperties *prop,
- const GstTensorMemory *input, GstTensorMemory *output);
-
- virtual int loadModel (std::string model_path) { return -EPERM; }
- virtual int getInputTensorDim (GstTensorsInfo *info) { return -EPERM; }
- virtual int getOutputTensorDim (GstTensorsInfo *info) { return -EPERM; }
-
- protected:
- npudev_h dev_;
- dev_type type_;
-
- npubin_meta *meta_;
- uint32_t model_id_;
-
- GstTensorsInfo input_tensor_meta_;
- GstTensorsInfo output_tensor_meta_;
-
- std::string model_path_;
-};
-
-#endif /* __TENSOR_FILTER_SUBPLUGIN_H__ */
+++ /dev/null
-/**
- * Proprietary
- * Copyright (C) 2020 Samsung Electronics
- * Copyright (C) 2020 Dongju Chae <dongju.chae@samsung.com>
- */
-/**
- * @file tensor_filter_subplugin_triv2.cc
- * @date 20 Jan 2020
- * @brief NNStreamer tensor-filter subplugin for TRIV2
- * @see http://github.com/nnstreamer/nnstreamer
- * @see https://github.sec.samsung.net/AIP/NPU_SystemService
- * @author Dongju Chae <dongju.chae@samsung.com>
- * @bug No known bugs except for NYI items
- */
-
-#include <tensor_filter_subplugin.h>
-#include <inttypes.h>
-
-#define DEV_TYPE (NPUCOND_TRIV2_CONN_SOCIP)
-
-void init_filter_srnpu_triv2 (void) __attribute__ ((constructor));
-void fini_filter_srnpu_triv2 (void) __attribute__ ((destructor));
-
-static const gchar *triv2_accl_support[] = {ACCL_NPU_SR_STR, NULL};
-
-class TensorFilterTRIV2 : public TensorFilterSRNPU {
- public:
- TensorFilterTRIV2 () : TensorFilterSRNPU (DEV_TYPE) {}
-
- int loadModel (std::string model_path) {
- meta_ = getNPUmodel_metadata (model_path.c_str (), false);
- if (meta_ == nullptr) {
- return -EINVAL;
- }
-
- uint64_t version = NPUBIN_VERSION (meta_->magiccode);
- if (version != 3) {
- ml_loge ("Unsupported npubinfmt version: %" PRId64, version);
- return -EINVAL;
- }
-
- generic_buffer model;
- model.type = BUFFER_FILE;
- model.size = meta_->size;
- model.filepath = model_path.c_str ();
-
- int status = registerNPUmodel (dev_, &model, &model_id_);
- if (status != 0) {
- ml_loge ("Unable to register NPU model: %d", status);
- return status;
- }
-
- status = set_constraint ();
- if (status != 0) {
- ml_loge ("Unable to set constraints: %d", status);
- return status;
- }
-
- return 0;
- }
-
- int getInputTensorDim (GstTensorsInfo *info) {
- if (info == nullptr || meta_ == nullptr)
- return -EINVAL;
-
- gst_tensors_info_init (info);
- gst_tensors_info_copy (info, &input_tensor_meta_);
-
- return 0;
- }
-
- int getOutputTensorDim (GstTensorsInfo *info) {
- if (info == nullptr || meta_ == nullptr)
- return -EINVAL;
-
- gst_tensors_info_init (info);
- gst_tensors_info_copy (info, &output_tensor_meta_);
-
- return 0;
- }
-
- int setDataInfo (const GstTensorFilterProperties *prop) {
- tensors_data_info info_in, info_out;
-
- info_in.num_info = meta_->input_seg_num;
- for (uint32_t idx = 0; idx < info_in.num_info; idx++) {
- info_in.info[idx].layout = DATA_LAYOUT_NHWC;
- info_in.info[idx].type = get_data_type (prop->input_meta.info[idx].type);
- }
-
- info_out.num_info = meta_->output_seg_num;
- for (uint32_t idx = 0; idx < info_out.num_info; idx++) {
- info_out.info[idx].layout = DATA_LAYOUT_NHWC;
- info_out.info[idx].type =
- get_data_type (prop->output_meta.info[idx].type);
- }
-
- copy_tensors_info (&info_in, &input_tensor_meta_, true);
- copy_tensors_info (&info_out, &output_tensor_meta_, false);
-
- return setNPU_dataInfo (dev_, model_id_, &info_in, &info_out);
- }
-
- private:
- void copy_tensors_info (const tensors_data_info *ne_info,
- GstTensorsInfo *nns_info, bool is_input) {
- gst_tensors_info_init (nns_info);
-
- nns_info->num_tensors = ne_info->num_info;
- for (uint32_t i = 0; i < ne_info->num_info; i++) {
- nns_info->info[i].type = get_tensor_type (ne_info->info[i].type);
- for (uint32_t j = 0; j < NNS_TENSOR_RANK_LIMIT; ++j) {
- if (is_input)
- nns_info->info[i].dimension[j] =
- meta_->input_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
- else
- nns_info->info[i].dimension[j] =
- meta_->output_seg_dims[i][NNS_TENSOR_RANK_LIMIT - j - 1];
- }
- }
- }
-
- data_layout get_data_layout (tensor_layout layout) {
- switch (layout) {
- case _NNS_LAYOUT_NHWC:
- /* currently, support only NHWC */
- return DATA_LAYOUT_NHWC;
- default:
- return DATA_LAYOUT_MODEL;
- }
- }
-
- data_type get_data_type (tensor_type type) {
- switch (type) {
- case _NNS_INT8:
- return DATA_TYPE_INT8;
- case _NNS_UINT8:
- return DATA_TYPE_UINT8;
- case _NNS_INT16:
- return DATA_TYPE_INT16;
- case _NNS_UINT16:
- return DATA_TYPE_UINT16;
- case _NNS_INT32:
- return DATA_TYPE_INT32;
- case _NNS_UINT32:
- return DATA_TYPE_UINT32;
- case _NNS_INT64:
- return DATA_TYPE_INT32;
- case _NNS_UINT64:
- return DATA_TYPE_UINT32;
- case _NNS_FLOAT32:
- return DATA_TYPE_FLOAT32;
- case _NNS_FLOAT64:
- return DATA_TYPE_FLOAT64;
- default:
- /* use the quantization type specified in the model metadata */
- return DATA_TYPE_MODEL;
- }
- }
-
- tensor_type get_tensor_type (data_type type) {
- switch (type) {
- case DATA_TYPE_INT8:
- return _NNS_INT8;
- case DATA_TYPE_UINT8:
- return _NNS_UINT8;
- case DATA_TYPE_INT16:
- return _NNS_INT16;
- case DATA_TYPE_UINT16:
- return _NNS_UINT16;
- case DATA_TYPE_INT32:
- return _NNS_INT32;
- case DATA_TYPE_UINT32:
- return _NNS_UINT32;
- case DATA_TYPE_FLOAT32:
- return _NNS_FLOAT32;
- case DATA_TYPE_FLOAT64:
- return _NNS_FLOAT64;
- default:
- /* default is uint8 */
- return _NNS_UINT8;
- }
- }
-
- int set_constraint () {
- npuConstraint constraint;
-
- constraint.timeout_ms = 5000;
- constraint.priority = NPU_PRIORITY_MID;
- constraint.notimode = NPU_INTERRUPT;
-
- return setNPU_constraint (dev_, model_id_, constraint);
- }
-};
-
-static void
-triv2_close (const GstTensorFilterProperties *prop, void **private_data) {
- TensorFilterTRIV2 *srnpu = static_cast<TensorFilterTRIV2 *> (*private_data);
-
- if (srnpu != nullptr)
- delete srnpu;
-
- *private_data = nullptr;
-}
-
-static int
-triv2_open (const GstTensorFilterProperties *prop, void **private_data) {
- if (prop->num_models > 1) {
- ml_logw ("Multiple models are not supported");
- }
-
- if (prop->model_files[0] == nullptr || prop->model_files[0][0] == '\x00') {
- ml_loge ("Invalid model path provided");
- return -EINVAL;
- }
-
- TensorFilterTRIV2 *srnpu = static_cast<TensorFilterTRIV2 *> (*private_data);
- if (srnpu != nullptr) {
- triv2_close (prop, private_data);
- srnpu = nullptr;
- }
-
- srnpu = new TensorFilterTRIV2 ();
- *private_data = srnpu;
-
- int status = srnpu->open ();
- if (status != 0) {
- ml_loge ("Failed to open TRIV2 device: %d", status);
- goto err_free;
- }
-
- status = srnpu->loadModel (prop->model_files[0]);
- if (status != 0) {
- ml_loge ("Failed to load model: %d", status);
- goto err_free;
- }
-
- status = srnpu->setDataInfo (prop);
- if (status != 0) {
- ml_loge ("Failed to set data info: %d", status);
- goto err_free;
- }
-
- return 0;
-
-err_free:
- delete srnpu;
- *private_data = nullptr;
-
- return status;
-}
-
-static int
-triv2_invoke (const GstTensorFilterProperties *prop, void **private_data,
- const GstTensorMemory *input, GstTensorMemory *output) {
- TensorFilterTRIV2 *srnpu = static_cast<TensorFilterTRIV2 *> (*private_data);
-
- g_return_val_if_fail (srnpu != nullptr, -EINVAL);
-
- return srnpu->invoke (prop, input, output);
-}
-
-static int
-triv2_getInputDim (const GstTensorFilterProperties *prop, void **private_data,
- GstTensorsInfo *info) {
- TensorFilterTRIV2 *srnpu = static_cast<TensorFilterTRIV2 *> (*private_data);
-
- g_return_val_if_fail (srnpu != nullptr, -EINVAL);
-
- return srnpu->getInputTensorDim (info);
-}
-
-static int
-triv2_getOutputDim (const GstTensorFilterProperties *prop, void **private_data,
- GstTensorsInfo *info) {
- TensorFilterTRIV2 *srnpu = static_cast<TensorFilterTRIV2 *> (*private_data);
-
- g_return_val_if_fail (srnpu != nullptr, -EINVAL);
-
- return srnpu->getOutputTensorDim (info);
-}
-
-static int
-triv2_checkAvailability (accl_hw hw) {
- if (g_strv_contains (triv2_accl_support, get_accl_hw_str (hw)))
- return 0;
-
- return -ENOENT;
-}
-
-static gchar filter_subplugin_srnpu_triv2[] = "srnpu";
-
-static GstTensorFilterFramework NNS_support_srnpu_triv2 = {
- .version = GST_TENSOR_FILTER_FRAMEWORK_V0,
- .open = triv2_open,
- .close = triv2_close,
-};
-
-void
-init_filter_srnpu_triv2 (void) {
- if (getnumNPUdeviceByType (DEV_TYPE) <= 0) {
- ml_loge ("No available TRIV2 device");
- return;
- }
-
- NNS_support_srnpu_triv2.name = filter_subplugin_srnpu_triv2;
- NNS_support_srnpu_triv2.allow_in_place = FALSE;
- NNS_support_srnpu_triv2.allocate_in_invoke = TRUE;
- NNS_support_srnpu_triv2.run_without_model = FALSE;
- NNS_support_srnpu_triv2.verify_model_path = FALSE;
- NNS_support_srnpu_triv2.invoke_NN = triv2_invoke;
- NNS_support_srnpu_triv2.getInputDimension = triv2_getInputDim;
- NNS_support_srnpu_triv2.getOutputDimension = triv2_getOutputDim;
- NNS_support_srnpu_triv2.checkAvailability = triv2_checkAvailability;
- NNS_support_srnpu_triv2.destroyNotify = nullptr; /* reply on g_free() */
-
- nnstreamer_filter_probe (&NNS_support_srnpu_triv2);
-}
-
-void
-fini_filter_srnpu_triv2 (void) {
- if (getnumNPUdeviceByType (DEV_TYPE) <= 0)
- return;
-
- nnstreamer_filter_exit (NNS_support_srnpu_triv2.name);
-}