From: Rafal Walczyna Date: Fri, 8 Jan 2021 15:39:37 +0000 (+0100) Subject: [ML][Common] Add MLTensorsInfoManager X-Git-Tag: submit/tizen/20210128.113801~20 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=refs%2Fchanges%2F57%2F251157%2F8;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git [ML][Common] Add MLTensorsInfoManager ACR: TWDAPI-273 [Verification] Built successful Change-Id: I85a0fdcbcbc42ddcce406d5c6ae5a428536d4ce7 Signed-off-by: Rafal Walczyna --- diff --git a/src/ml/ml.gyp b/src/ml/ml.gyp index 1c60e04d..c959f430 100644 --- a/src/ml/ml.gyp +++ b/src/ml/ml.gyp @@ -23,6 +23,8 @@ 'ml_pipeline_nodeinfo.h', 'ml_pipeline_switch.cc', 'ml_pipeline_switch.h', + 'ml_tensors_info_manager.cc', + 'ml_tensors_info_manager.h', 'ml_utils.cc', 'ml_utils.h', ], diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc index 853dab4c..97d1c37c 100644 --- a/src/ml/ml_instance.cc +++ b/src/ml/ml_instance.cc @@ -17,6 +17,7 @@ #include "ml_instance.h" #include "ml_utils.h" +#include "common/converter.h" #include "common/logger.h" #include "common/picojson.h" #include "common/platform_result.h" @@ -69,6 +70,10 @@ MlInstance::~MlInstance() { ScopeLogger(); } +TensorsInfoManager& MlInstance::GetTensorsInfoManager() { + return tensors_info_manager_; +} + // Common ML API begin void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) { ScopeLogger("args: %s", args.serialize().c_str()); diff --git a/src/ml/ml_instance.h b/src/ml/ml_instance.h index acb013c1..ff2f74b1 100644 --- a/src/ml/ml_instance.h +++ b/src/ml/ml_instance.h @@ -23,6 +23,8 @@ #include "nnstreamer/nnstreamer-single.h" #include "nnstreamer/nnstreamer.h" +#include "ml_tensors_info_manager.h" + namespace extension { namespace ml { @@ -30,11 +32,13 @@ class MlInstance : public common::ParsedInstance { public: MlInstance(); virtual ~MlInstance(); + TensorsInfoManager& GetTensorsInfoManager(); private: // Common ML API begin void MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out); + TensorsInfoManager tensors_info_manager_; // Common ML API end // Single API begin diff --git a/src/ml/ml_pipeline_nodeinfo.cc b/src/ml/ml_pipeline_nodeinfo.cc index c1e7a34c..91fe836a 100644 --- a/src/ml/ml_pipeline_nodeinfo.cc +++ b/src/ml/ml_pipeline_nodeinfo.cc @@ -51,7 +51,7 @@ NodeInfo::NodeInfo(const std::string& name) : name_{name} { NodeInfo::~NodeInfo() { ScopeLogger("name: [%s], handle: [%p]", name_.c_str(), node_info_); - auto ret = ml_pipeline_element_release_handle(node_info_); + auto ret = ml_pipeline_element_release_handle(node_info_); if (ML_ERROR_NONE != ret) { LoggerE("ml_pipeline_element_release_handle() failed: [%d] (%s)", ret, get_error_message(ret)); } else { diff --git a/src/ml/ml_tensors_info_manager.cc b/src/ml/ml_tensors_info_manager.cc new file mode 100644 index 00000000..ea80daf1 --- /dev/null +++ b/src/ml/ml_tensors_info_manager.cc @@ -0,0 +1,224 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "ml_tensors_info_manager.h" + +using common::ErrorCode; +using common::PlatformResult; + +namespace extension { +namespace ml { + +TensorsInfo::TensorsInfo(ml_tensors_info_h handle, int id) : handle_(handle), id_(id) { + ScopeLogger(); +} + +TensorsInfo::~TensorsInfo() { + ScopeLogger(); + this->NativeDestroy(); +} + +ml_tensors_info_h TensorsInfo::Handle() { + return this->handle_; +} + +int TensorsInfo::Id() { + return this->id_; +} + +PlatformResult TensorsInfo::NativeDestroy() { + ScopeLogger("id_: %d", id_); + int ret = ml_tensors_info_destroy(handle_); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_destroy failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to destroy handle"); + } + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeGetCount(unsigned int* count) { + ScopeLogger("id_: %d", id_); + + int ret = ml_tensors_info_get_count(handle_, count); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_get_count failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to get count"); + } + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeSetCount(unsigned int count) { + ScopeLogger("id_: %d, count: %u", id_, count); + + int ret = ml_tensors_info_set_count(handle_, count); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_set_count failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to set count"); + } + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeGetTensorDimensions(int index, + unsigned int dim[ML_TENSOR_RANK_LIMIT]) { + ScopeLogger("id_: %d, index: %d", id_, index); + + int ret = ml_tensors_info_get_tensor_dimension(handle_, index, dim); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_get_tensor_dimension failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to get tensor dimension"); + } + + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeSetTensorDimensions(int index, + unsigned int dim[ML_TENSOR_RANK_LIMIT]) { + ScopeLogger("id_: %d, index: %d", id_, index); + + int ret = ml_tensors_info_set_tensor_dimension(handle_, index, dim); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_set_tensor_dimension failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to set tensor dimension"); + } + + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeGetTensorName(int index, std::string* name) { + ScopeLogger("id_: %d, index: %d", id_, index); + + if (nullptr == name) { + LoggerE("name is nullptr"); + return PlatformResult(ErrorCode::ABORT_ERR, "Failed to get tensor name"); + } + + gchar* out_name = nullptr; + int ret = ml_tensors_info_get_tensor_name(handle_, index, &out_name); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_get_tensor_name failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to get tensor name"); + } + if (out_name) { + LoggerD("out_name: %s", out_name); + *name = std::string{out_name}; + } else { + *name = std::string{}; + } + + g_free(out_name); + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeSetTensorName(int index, const std::string& name) { + ScopeLogger("id_: %d, index: %d, name: %s", id_, index, name.c_str()); + + int ret = ml_tensors_info_set_tensor_name(handle_, index, name.c_str()); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_set_tensor_name failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to set tensor name"); + } + + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeGetTensorType(int index, ml_tensor_type_e* type) { + ScopeLogger("id_: %d, index: %d", id_, index); + + int ret = ml_tensors_info_get_tensor_type(handle_, index, type); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_get_tensor_type failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to get tensor type"); + } + + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeSetTensorType(int index, const ml_tensor_type_e type) { + ScopeLogger("id_: %d, index: %d, type: %d", id_, index, type); + + int ret = ml_tensors_info_set_tensor_type(handle_, index, type); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_set_tensor_type failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to set tensor type"); + } + + return PlatformResult(ErrorCode::NO_ERROR); +} + +PlatformResult TensorsInfo::NativeGetTensorSize(int index, size_t* size) { + ScopeLogger("id_: %d, index: %d", id_, index); + + int ret = ml_tensors_info_get_tensor_size(handle_, index, size); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_get_tensor_size failed: %d (%s)", ret, get_error_message(ret)); + return util::ToPlatformResult(ret, "Failed to get tensor size"); + } + + return PlatformResult(ErrorCode::NO_ERROR); +} + +TensorsInfoManager::TensorsInfoManager() : nextId_(0) { + ScopeLogger(); +} + +TensorsInfoManager::~TensorsInfoManager() { + ScopeLogger(); + + map_by_id_.clear(); + map_by_handle_.clear(); +}; + +TensorsInfo* TensorsInfoManager::CreateTensorsInfo() { + ScopeLogger(); + + ml_tensors_info_h handle; + int ret = ml_tensors_info_create(&handle); + if (ML_ERROR_NONE != ret) { + LoggerE("ml_tensors_info_create failed: %d (%s)", ret, get_error_message(ret)); + return nullptr; + } + + int id = nextId_++; + auto t = std::make_shared(handle, id); + map_by_id_[id] = t; + map_by_handle_[handle] = t; + + return t.get(); +}; + +TensorsInfo* TensorsInfoManager::GetTensorsInfo(int id) { + ScopeLogger("id: %d", id); + + if (map_by_id_.end() != map_by_id_.find(id)) { + return map_by_id_[id].get(); + } + + return nullptr; +} + +TensorsInfo* TensorsInfoManager::GetTensorsInfo(ml_tensors_info_h handle) { + ScopeLogger(); + + if (map_by_handle_.end() != map_by_handle_.find(handle)) { + return map_by_handle_[handle].get(); + } + return nullptr; +} + +} // ml +} // extension diff --git a/src/ml/ml_tensors_info_manager.h b/src/ml/ml_tensors_info_manager.h new file mode 100644 index 00000000..734a56a2 --- /dev/null +++ b/src/ml/ml_tensors_info_manager.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __ML_TENSORS_INFO_MANAGER_H__ +#define __ML_TENSORS_INFO_MANAGER_H__ + +#include +#include + +#include "common/logger.h" +#include "common/platform_result.h" +#include "ml_utils.h" + +#include + +using common::PlatformResult; +using common::ErrorCode; + +namespace extension { +namespace ml { + +class TensorsInfo { + public: + TensorsInfo(ml_tensors_info_h handle, int id); + ~TensorsInfo(); + + ml_tensors_info_h Handle(); + int Id(); + + PlatformResult NativeDestroy(); + PlatformResult NativeGetCount(unsigned int* count); + PlatformResult NativeSetCount(unsigned int count); + PlatformResult NativeGetTensorDimensions(int index, unsigned int dim[ML_TENSOR_RANK_LIMIT]); + PlatformResult NativeSetTensorDimensions(int index, unsigned int dim[ML_TENSOR_RANK_LIMIT]); + PlatformResult NativeGetTensorName(int index, std::string* name); + PlatformResult NativeSetTensorName(int index, const std::string& name); + PlatformResult NativeGetTensorType(int index, ml_tensor_type_e* type); + PlatformResult NativeSetTensorType(int index, const ml_tensor_type_e type); + PlatformResult NativeGetTensorSize(int index, size_t* size); + + private: + ml_tensors_info_h handle_; + int id_; +}; + +class TensorsInfoManager { + public: + TensorsInfoManager(); + ~TensorsInfoManager(); + TensorsInfo* CreateTensorsInfo(); + + TensorsInfo* GetTensorsInfo(int id); + TensorsInfo* GetTensorsInfo(ml_tensors_info_h handle); + + private: + TensorsInfoManager(TensorsInfoManager const&) = delete; + TensorsInfoManager& operator=(TensorsInfoManager const&) = delete; + std::map> map_by_id_; + std::map> map_by_handle_; + int nextId_; +}; + +} // ml +} // extension +#endif // __ML_TENSORS_INFO_MANAGER_H__