[ML][Common] Add MLTensorsInfoManager 57/251157/8
authorRafal Walczyna <r.walczyna@samsung.com>
Fri, 8 Jan 2021 15:39:37 +0000 (16:39 +0100)
committerRafal Walczyna <r.walczyna@samsung.com>
Tue, 19 Jan 2021 10:04:13 +0000 (11:04 +0100)
ACR: TWDAPI-273

[Verification] Built successful

Change-Id: I85a0fdcbcbc42ddcce406d5c6ae5a428536d4ce7
Signed-off-by: Rafal Walczyna <r.walczyna@samsung.com>
src/ml/ml.gyp
src/ml/ml_instance.cc
src/ml/ml_instance.h
src/ml/ml_pipeline_nodeinfo.cc
src/ml/ml_tensors_info_manager.cc [new file with mode: 0644]
src/ml/ml_tensors_info_manager.h [new file with mode: 0644]

index 1c60e04..c959f43 100644 (file)
@@ -23,6 +23,8 @@
         'ml_pipeline_nodeinfo.h',
         'ml_pipeline_switch.cc',
         'ml_pipeline_switch.h',
+        'ml_tensors_info_manager.cc',
+        'ml_tensors_info_manager.h',
         'ml_utils.cc',
         'ml_utils.h',
       ],
index 853dab4..97d1c37 100644 (file)
@@ -17,6 +17,7 @@
 #include "ml_instance.h"
 #include "ml_utils.h"
 
+#include "common/converter.h"
 #include "common/logger.h"
 #include "common/picojson.h"
 #include "common/platform_result.h"
@@ -69,6 +70,10 @@ MlInstance::~MlInstance() {
   ScopeLogger();
 }
 
+TensorsInfoManager& MlInstance::GetTensorsInfoManager() {
+  return tensors_info_manager_;
+}
+
 // Common ML API begin
 void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
   ScopeLogger("args: %s", args.serialize().c_str());
index acb013c..ff2f74b 100644 (file)
@@ -23,6 +23,8 @@
 #include "nnstreamer/nnstreamer-single.h"
 #include "nnstreamer/nnstreamer.h"
 
+#include "ml_tensors_info_manager.h"
+
 namespace extension {
 namespace ml {
 
@@ -30,11 +32,13 @@ class MlInstance : public common::ParsedInstance {
  public:
   MlInstance();
   virtual ~MlInstance();
+  TensorsInfoManager& GetTensorsInfoManager();
 
  private:
   // Common ML API begin
   void MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out);
 
+  TensorsInfoManager tensors_info_manager_;
   // Common ML API end
 
   // Single API begin
index c1e7a34..91fe836 100644 (file)
@@ -51,7 +51,7 @@ NodeInfo::NodeInfo(const std::string& name) : name_{name} {
 NodeInfo::~NodeInfo() {
   ScopeLogger("name: [%s], handle: [%p]", name_.c_str(), node_info_);
 
-  auto ret =  ml_pipeline_element_release_handle(node_info_);
+  auto ret = ml_pipeline_element_release_handle(node_info_);
   if (ML_ERROR_NONE != ret) {
     LoggerE("ml_pipeline_element_release_handle() failed: [%d] (%s)", ret, get_error_message(ret));
   } else {
diff --git a/src/ml/ml_tensors_info_manager.cc b/src/ml/ml_tensors_info_manager.cc
new file mode 100644 (file)
index 0000000..ea80daf
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+
+#include <glib.h>
+
+#include "ml_tensors_info_manager.h"
+
+using common::ErrorCode;
+using common::PlatformResult;
+
+namespace extension {
+namespace ml {
+
+TensorsInfo::TensorsInfo(ml_tensors_info_h handle, int id) : handle_(handle), id_(id) {
+  ScopeLogger();
+}
+
+TensorsInfo::~TensorsInfo() {
+  ScopeLogger();
+  this->NativeDestroy();
+}
+
+ml_tensors_info_h TensorsInfo::Handle() {
+  return this->handle_;
+}
+
+int TensorsInfo::Id() {
+  return this->id_;
+}
+
+PlatformResult TensorsInfo::NativeDestroy() {
+  ScopeLogger("id_: %d", id_);
+  int ret = ml_tensors_info_destroy(handle_);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_destroy failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to destroy handle");
+  }
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetCount(unsigned int* count) {
+  ScopeLogger("id_: %d", id_);
+
+  int ret = ml_tensors_info_get_count(handle_, count);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_get_count failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to get count");
+  }
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetCount(unsigned int count) {
+  ScopeLogger("id_: %d, count: %u", id_, count);
+
+  int ret = ml_tensors_info_set_count(handle_, count);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_set_count failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to set count");
+  }
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorDimensions(int index,
+                                                      unsigned int dim[ML_TENSOR_RANK_LIMIT]) {
+  ScopeLogger("id_: %d, index: %d", id_, index);
+
+  int ret = ml_tensors_info_get_tensor_dimension(handle_, index, dim);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_get_tensor_dimension failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to get tensor dimension");
+  }
+
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetTensorDimensions(int index,
+                                                      unsigned int dim[ML_TENSOR_RANK_LIMIT]) {
+  ScopeLogger("id_: %d, index: %d", id_, index);
+
+  int ret = ml_tensors_info_set_tensor_dimension(handle_, index, dim);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_set_tensor_dimension failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to set tensor dimension");
+  }
+
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorName(int index, std::string* name) {
+  ScopeLogger("id_: %d, index: %d", id_, index);
+
+  if (nullptr == name) {
+    LoggerE("name is nullptr");
+    return PlatformResult(ErrorCode::ABORT_ERR, "Failed to get tensor name");
+  }
+
+  gchar* out_name = nullptr;
+  int ret = ml_tensors_info_get_tensor_name(handle_, index, &out_name);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_get_tensor_name failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to get tensor name");
+  }
+  if (out_name) {
+    LoggerD("out_name: %s", out_name);
+    *name = std::string{out_name};
+  } else {
+    *name = std::string{};
+  }
+
+  g_free(out_name);
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetTensorName(int index, const std::string& name) {
+  ScopeLogger("id_: %d, index: %d, name: %s", id_, index, name.c_str());
+
+  int ret = ml_tensors_info_set_tensor_name(handle_, index, name.c_str());
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_set_tensor_name failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to set tensor name");
+  }
+
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorType(int index, ml_tensor_type_e* type) {
+  ScopeLogger("id_: %d, index: %d", id_, index);
+
+  int ret = ml_tensors_info_get_tensor_type(handle_, index, type);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_get_tensor_type failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to get tensor type");
+  }
+
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetTensorType(int index, const ml_tensor_type_e type) {
+  ScopeLogger("id_: %d, index: %d, type: %d", id_, index, type);
+
+  int ret = ml_tensors_info_set_tensor_type(handle_, index, type);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_set_tensor_type failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to set tensor type");
+  }
+
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorSize(int index, size_t* size) {
+  ScopeLogger("id_: %d, index: %d", id_, index);
+
+  int ret = ml_tensors_info_get_tensor_size(handle_, index, size);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_get_tensor_size failed: %d (%s)", ret, get_error_message(ret));
+    return util::ToPlatformResult(ret, "Failed to get tensor size");
+  }
+
+  return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+TensorsInfoManager::TensorsInfoManager() : nextId_(0) {
+  ScopeLogger();
+}
+
+TensorsInfoManager::~TensorsInfoManager() {
+  ScopeLogger();
+
+  map_by_id_.clear();
+  map_by_handle_.clear();
+};
+
+TensorsInfo* TensorsInfoManager::CreateTensorsInfo() {
+  ScopeLogger();
+
+  ml_tensors_info_h handle;
+  int ret = ml_tensors_info_create(&handle);
+  if (ML_ERROR_NONE != ret) {
+    LoggerE("ml_tensors_info_create failed: %d (%s)", ret, get_error_message(ret));
+    return nullptr;
+  }
+
+  int id = nextId_++;
+  auto t = std::make_shared<TensorsInfo>(handle, id);
+  map_by_id_[id] = t;
+  map_by_handle_[handle] = t;
+
+  return t.get();
+};
+
+TensorsInfo* TensorsInfoManager::GetTensorsInfo(int id) {
+  ScopeLogger("id: %d", id);
+
+  if (map_by_id_.end() != map_by_id_.find(id)) {
+    return map_by_id_[id].get();
+  }
+
+  return nullptr;
+}
+
+TensorsInfo* TensorsInfoManager::GetTensorsInfo(ml_tensors_info_h handle) {
+  ScopeLogger();
+
+  if (map_by_handle_.end() != map_by_handle_.find(handle)) {
+    return map_by_handle_[handle].get();
+  }
+  return nullptr;
+}
+
+}  // ml
+}  // extension
diff --git a/src/ml/ml_tensors_info_manager.h b/src/ml/ml_tensors_info_manager.h
new file mode 100644 (file)
index 0000000..734a56a
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ *    Licensed under the Apache License, Version 2.0 (the "License");
+ *    you may not use this file except in compliance with the License.
+ *    You may obtain a copy of the License at
+ *
+ *        http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *    Unless required by applicable law or agreed to in writing, software
+ *    distributed under the License is distributed on an "AS IS" BASIS,
+ *    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *    See the License for the specific language governing permissions and
+ *    limitations under the License.
+ */
+
+#ifndef __ML_TENSORS_INFO_MANAGER_H__
+#define __ML_TENSORS_INFO_MANAGER_H__
+
+#include <map>
+#include <vector>
+
+#include "common/logger.h"
+#include "common/platform_result.h"
+#include "ml_utils.h"
+
+#include <nnstreamer/nnstreamer.h>
+
+using common::PlatformResult;
+using common::ErrorCode;
+
+namespace extension {
+namespace ml {
+
+class TensorsInfo {
+ public:
+  TensorsInfo(ml_tensors_info_h handle, int id);
+  ~TensorsInfo();
+
+  ml_tensors_info_h Handle();
+  int Id();
+
+  PlatformResult NativeDestroy();
+  PlatformResult NativeGetCount(unsigned int* count);
+  PlatformResult NativeSetCount(unsigned int count);
+  PlatformResult NativeGetTensorDimensions(int index, unsigned int dim[ML_TENSOR_RANK_LIMIT]);
+  PlatformResult NativeSetTensorDimensions(int index, unsigned int dim[ML_TENSOR_RANK_LIMIT]);
+  PlatformResult NativeGetTensorName(int index, std::string* name);
+  PlatformResult NativeSetTensorName(int index, const std::string& name);
+  PlatformResult NativeGetTensorType(int index, ml_tensor_type_e* type);
+  PlatformResult NativeSetTensorType(int index, const ml_tensor_type_e type);
+  PlatformResult NativeGetTensorSize(int index, size_t* size);
+
+ private:
+  ml_tensors_info_h handle_;
+  int id_;
+};
+
+class TensorsInfoManager {
+ public:
+  TensorsInfoManager();
+  ~TensorsInfoManager();
+  TensorsInfo* CreateTensorsInfo();
+
+  TensorsInfo* GetTensorsInfo(int id);
+  TensorsInfo* GetTensorsInfo(ml_tensors_info_h handle);
+
+ private:
+  TensorsInfoManager(TensorsInfoManager const&) = delete;
+  TensorsInfoManager& operator=(TensorsInfoManager const&) = delete;
+  std::map<int, std::shared_ptr<TensorsInfo>> map_by_id_;
+  std::map<ml_tensors_info_h, std::shared_ptr<TensorsInfo>> map_by_handle_;
+  int nextId_;
+};
+
+}  // ml
+}  // extension
+#endif  // __ML_TENSORS_INFO_MANAGER_H__