--- /dev/null
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <glib.h>
+
+#include "ml_tensors_info_manager.h"
+
+using common::ErrorCode;
+using common::PlatformResult;
+
+namespace extension {
+namespace ml {
+
+TensorsInfo::TensorsInfo(ml_tensors_info_h handle, int id) : handle_(handle), id_(id) {
+ ScopeLogger();
+}
+
+TensorsInfo::~TensorsInfo() {
+ ScopeLogger();
+ this->NativeDestroy();
+}
+
+ml_tensors_info_h TensorsInfo::Handle() {
+ return this->handle_;
+}
+
+int TensorsInfo::Id() {
+ return this->id_;
+}
+
+PlatformResult TensorsInfo::NativeDestroy() {
+ ScopeLogger("id_: %d", id_);
+ int ret = ml_tensors_info_destroy(handle_);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_destroy failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to destroy handle");
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetCount(unsigned int* count) {
+ ScopeLogger("id_: %d", id_);
+
+ int ret = ml_tensors_info_get_count(handle_, count);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_get_count failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to get count");
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetCount(unsigned int count) {
+ ScopeLogger("id_: %d, count: %u", id_, count);
+
+ int ret = ml_tensors_info_set_count(handle_, count);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_set_count failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to set count");
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorDimensions(int index,
+ unsigned int dim[ML_TENSOR_RANK_LIMIT]) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+
+ int ret = ml_tensors_info_get_tensor_dimension(handle_, index, dim);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_get_tensor_dimension failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to get tensor dimension");
+ }
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetTensorDimensions(int index,
+ unsigned int dim[ML_TENSOR_RANK_LIMIT]) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+
+ int ret = ml_tensors_info_set_tensor_dimension(handle_, index, dim);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_set_tensor_dimension failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to set tensor dimension");
+ }
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorName(int index, std::string* name) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+
+ if (nullptr == name) {
+ LoggerE("name is nullptr");
+ return PlatformResult(ErrorCode::ABORT_ERR, "Failed to get tensor name");
+ }
+
+ gchar* out_name = nullptr;
+ int ret = ml_tensors_info_get_tensor_name(handle_, index, &out_name);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_get_tensor_name failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to get tensor name");
+ }
+ if (out_name) {
+ LoggerD("out_name: %s", out_name);
+ *name = std::string{out_name};
+ } else {
+ *name = std::string{};
+ }
+
+ g_free(out_name);
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetTensorName(int index, const std::string& name) {
+ ScopeLogger("id_: %d, index: %d, name: %s", id_, index, name.c_str());
+
+ int ret = ml_tensors_info_set_tensor_name(handle_, index, name.c_str());
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_set_tensor_name failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to set tensor name");
+ }
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorType(int index, ml_tensor_type_e* type) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+
+ int ret = ml_tensors_info_get_tensor_type(handle_, index, type);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_get_tensor_type failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to get tensor type");
+ }
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeSetTensorType(int index, const ml_tensor_type_e type) {
+ ScopeLogger("id_: %d, index: %d, type: %d", id_, index, type);
+
+ int ret = ml_tensors_info_set_tensor_type(handle_, index, type);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_set_tensor_type failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to set tensor type");
+ }
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+PlatformResult TensorsInfo::NativeGetTensorSize(int index, size_t* size) {
+ ScopeLogger("id_: %d, index: %d", id_, index);
+
+ int ret = ml_tensors_info_get_tensor_size(handle_, index, size);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_get_tensor_size failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to get tensor size");
+ }
+
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+TensorsInfoManager::TensorsInfoManager() : nextId_(0) {
+ ScopeLogger();
+}
+
+TensorsInfoManager::~TensorsInfoManager() {
+ ScopeLogger();
+
+ map_by_id_.clear();
+ map_by_handle_.clear();
+};
+
+TensorsInfo* TensorsInfoManager::CreateTensorsInfo() {
+ ScopeLogger();
+
+ ml_tensors_info_h handle;
+ int ret = ml_tensors_info_create(&handle);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_info_create failed: %d (%s)", ret, get_error_message(ret));
+ return nullptr;
+ }
+
+ int id = nextId_++;
+ auto t = std::make_shared<TensorsInfo>(handle, id);
+ map_by_id_[id] = t;
+ map_by_handle_[handle] = t;
+
+ return t.get();
+};
+
+TensorsInfo* TensorsInfoManager::GetTensorsInfo(int id) {
+ ScopeLogger("id: %d", id);
+
+ if (map_by_id_.end() != map_by_id_.find(id)) {
+ return map_by_id_[id].get();
+ }
+
+ return nullptr;
+}
+
+TensorsInfo* TensorsInfoManager::GetTensorsInfo(ml_tensors_info_h handle) {
+ ScopeLogger();
+
+ if (map_by_handle_.end() != map_by_handle_.find(handle)) {
+ return map_by_handle_[handle].get();
+ }
+ return nullptr;
+}
+
+} // ml
+} // extension
--- /dev/null
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ML_TENSORS_INFO_MANAGER_H__
+#define __ML_TENSORS_INFO_MANAGER_H__
+
+#include <map>
+#include <vector>
+
+#include "common/logger.h"
+#include "common/platform_result.h"
+#include "ml_utils.h"
+
+#include <nnstreamer/nnstreamer.h>
+
+using common::PlatformResult;
+using common::ErrorCode;
+
+namespace extension {
+namespace ml {
+
+class TensorsInfo {
+ public:
+ TensorsInfo(ml_tensors_info_h handle, int id);
+ ~TensorsInfo();
+
+ ml_tensors_info_h Handle();
+ int Id();
+
+ PlatformResult NativeDestroy();
+ PlatformResult NativeGetCount(unsigned int* count);
+ PlatformResult NativeSetCount(unsigned int count);
+ PlatformResult NativeGetTensorDimensions(int index, unsigned int dim[ML_TENSOR_RANK_LIMIT]);
+ PlatformResult NativeSetTensorDimensions(int index, unsigned int dim[ML_TENSOR_RANK_LIMIT]);
+ PlatformResult NativeGetTensorName(int index, std::string* name);
+ PlatformResult NativeSetTensorName(int index, const std::string& name);
+ PlatformResult NativeGetTensorType(int index, ml_tensor_type_e* type);
+ PlatformResult NativeSetTensorType(int index, const ml_tensor_type_e type);
+ PlatformResult NativeGetTensorSize(int index, size_t* size);
+
+ private:
+ ml_tensors_info_h handle_;
+ int id_;
+};
+
+class TensorsInfoManager {
+ public:
+ TensorsInfoManager();
+ ~TensorsInfoManager();
+ TensorsInfo* CreateTensorsInfo();
+
+ TensorsInfo* GetTensorsInfo(int id);
+ TensorsInfo* GetTensorsInfo(ml_tensors_info_h handle);
+
+ private:
+ TensorsInfoManager(TensorsInfoManager const&) = delete;
+ TensorsInfoManager& operator=(TensorsInfoManager const&) = delete;
+ std::map<int, std::shared_ptr<TensorsInfo>> map_by_id_;
+ std::map<ml_tensors_info_h, std::shared_ptr<TensorsInfo>> map_by_handle_;
+ int nextId_;
+};
+
+} // ml
+} // extension
+#endif // __ML_TENSORS_INFO_MANAGER_H__