// TensorsData
-var TensorsData = function() {
+var TensorsData = function(id, tensorsInfoId) {
Object.defineProperties(this, {
count: {
enumerable: true,
get: function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ return this._tensorsInfo.count;
}
},
tensorsInfo: {
enumerable: true,
get: function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ return this._tensorsInfo.clone();
}
+ },
+ _id: { value: id, writable: false, enumerable: false },
+ _tensorsInfo: {
+ value: new TensorsInfo(tensorsInfoId),
+ writable: false,
+ enumerable: false
}
});
};
return native_.getResultObject(result);
};
+var TensorsInfoGetTensorsDataValidExceptions = ['AbortError', 'NotSupportedError'];
+
TensorsInfo.prototype.getTensorsData = function() {
_CheckIfTensorsInfoNotDisposed(this._id);
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ var callArgs = {
+ tensorsInfoId: this._id
+ };
+
+ var result = native_.callSync('MLTensorsInfoGetTensorsData', callArgs);
+
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ TensorsInfoGetTensorsDataValidExceptions,
+ AbortError
+ );
+ }
+ return new TensorsData(result.tensorsDataId, result.tensorsInfoId);
};
TensorsInfo.prototype.dispose = function() {
'ml_pipeline_switch.cc',
'ml_pipeline_switch.h',
#TODO pipeline Source
+ #TODO pipeline Valve
'ml_pipeline_valve.h',
'ml_pipeline_valve.cc',
+ 'ml_tensors_data_manager.cc',
+ 'ml_tensors_data_manager.h',
'ml_tensors_info_manager.cc',
'ml_tensors_info_manager.h',
'ml_single_manager.cc',
const std::string kNnfw = "nnfw";
const std::string kHw = "hw";
const std::string kTensorsInfoId = "tensorsInfoId";
+const std::string kTensorsDataId = "tensorsDataId";
const std::string kIndex = "index";
const std::string kType = "type";
const std::string kName = "name";
CHECK_EXIST(args, name, out) \
CHECK_TYPE(args, name, type, out)
-MlInstance::MlInstance() : single_manager_{&tensors_info_manager_}, pipeline_manager_{this} {
+MlInstance::MlInstance()
+ : tensors_info_manager_{&tensors_data_manager_},
+ single_manager_{&tensors_info_manager_},
+ pipeline_manager_{this} {
ScopeLogger();
using namespace std::placeholders;
REGISTER_METHOD(MLTensorsInfoGetTensorSize);
REGISTER_METHOD(MLTensorsInfoGetTensorType);
REGISTER_METHOD(MLTensorsInfoSetTensorType);
+ REGISTER_METHOD(MLTensorsInfoGetTensorsData);
REGISTER_METHOD(MLTensorsInfoClone);
REGISTER_METHOD(MLTensorsInfoEquals);
REGISTER_METHOD(MLTensorsInfoDispose);
return tensors_info_manager_;
}
+TensorsDataManager& MlInstance::GetTensorsDataManager() {
+ return tensors_data_manager_;
+}
+
// Common ML API begin
void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
ReportSuccess(out);
}
+void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsInfoId, double, out);
+
+ int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
+ TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ if (nullptr == tensorsInfo) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ return;
+ }
+
+ TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
+ if (!tensorsData) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
+ ("Could not create TensorsData"));
+ return;
+ }
+
+ out[kTensorsDataId] = picojson::value(static_cast<double>(tensorsData->Id()));
+ out[kTensorsInfoId] = picojson::value(static_cast<double>(tensorsData->TensorsInfoId()));
+ ReportSuccess(out);
+}
+
void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int res_id = -1;
result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
- is_dynamic_mode, &res_id);
+ is_dynamic_mode, &res_id);
if (!result) {
ReportError(result, &out);
return;
#include "nnstreamer/nnstreamer-single.h"
#include "nnstreamer/nnstreamer.h"
+#include "ml_tensors_data_manager.h"
#include "ml_tensors_info_manager.h"
namespace extension {
MlInstance();
virtual ~MlInstance();
TensorsInfoManager& GetTensorsInfoManager();
+ TensorsDataManager& GetTensorsDataManager();
private:
// Common ML API begin
void MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out);
void MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out);
void MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out);
+ void MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out);
void MLTensorsInfoClone(const picojson::value& args, picojson::object& out);
void MLTensorsInfoEquals(const picojson::value& args, picojson::object& out);
void MLTensorsInfoDispose(const picojson::value& args, picojson::object& out);
TensorsInfoManager tensors_info_manager_;
+ TensorsDataManager tensors_data_manager_;
// Common ML API end
// Single API begin
--- /dev/null
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ml_tensors_data_manager.h"
+#include "ml_tensors_info_manager.h"
+
+using common::ErrorCode;
+using common::PlatformResult;
+
+namespace extension {
+namespace ml {
+
+TensorsData::TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info)
+ : handle_(handle), id_(id), tensors_info_(tensors_info) {
+ ScopeLogger();
+}
+
+TensorsData::~TensorsData() {
+ ScopeLogger();
+ if (this->NativeDestroy()) {
+ LoggerE("TensorsData NativeDestroy failed");
+ }
+ // TensorsDataManager releases tensors_info_
+}
+
+ml_tensors_data_h TensorsData::Handle() {
+ return this->handle_;
+}
+
+int TensorsData::Id() {
+ return this->id_;
+}
+
+int TensorsData::TensorsInfoId() {
+ return this->tensors_info_->Id();
+}
+
+int TensorsData::Count() {
+ return tensors_info_->Count();
+}
+
+PlatformResult TensorsData::NativeDestroy() {
+ ScopeLogger("id_: %d", id_);
+ int ret = ml_tensors_data_destroy(handle_);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_data_destroy failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to destroy handle");
+ }
+ return PlatformResult(ErrorCode::NO_ERROR);
+}
+
+TensorsDataManager::TensorsDataManager() : nextId_(0) {
+ ScopeLogger();
+}
+
+TensorsDataManager::~TensorsDataManager() {
+ ScopeLogger();
+ map_.clear();
+};
+
+TensorsData* TensorsDataManager::CreateTensorsData(TensorsInfo* tensors_info) {
+ ScopeLogger();
+ if (nullptr == tensors_info) {
+ LoggerE("Could not find tensor");
+ return nullptr;
+ }
+
+ ml_tensors_data_h tensors_data_handle;
+ int ret = ml_tensors_data_create(tensors_info->Handle(), &tensors_data_handle);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_tensors_data_create failed: %d (%s)", ret, get_error_message(ret));
+ return nullptr;
+ }
+
+ int id = nextId_++;
+ auto t = std::make_unique<TensorsData>(tensors_data_handle, id, tensors_info);
+ map_[id] = std::move(t);
+
+ return map_[id].get();
+};
+
+TensorsData* TensorsDataManager::GetTensorsData(int id) {
+ ScopeLogger("id: %d", id);
+
+ if (map_.end() != map_.find(id)) {
+ return map_[id].get();
+ }
+
+ return nullptr;
+}
+
+} // ml
+} // extension
--- /dev/null
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __ML_TENSORS_DATA_MANAGER_H__
+#define __ML_TENSORS_DATA_MANAGER_H__
+
+#include <unordered_map>
+
+#include "common/logger.h"
+#include "common/platform_result.h"
+#include "ml_utils.h"
+
+using common::PlatformResult;
+using common::ErrorCode;
+
+namespace extension {
+namespace ml {
+
+class TensorsInfo;
+
+class TensorsData {
+ public:
+ TensorsData(ml_tensors_data_h handle, int id, TensorsInfo* tensors_info);
+ ~TensorsData();
+
+ ml_tensors_data_h Handle();
+ int Id();
+ int TensorsInfoId();
+ int Count();
+
+ PlatformResult NativeDestroy();
+
+ private:
+ TensorsData(TensorsData const&) = delete;
+ TensorsData& operator=(TensorsData const&) = delete;
+
+ ml_tensors_data_h handle_;
+ int id_;
+ TensorsInfo* tensors_info_;
+};
+
+class TensorsDataManager {
+ public:
+ TensorsDataManager();
+ ~TensorsDataManager();
+
+ TensorsData* CreateTensorsData(TensorsInfo* tensors_info);
+ TensorsData* GetTensorsData(int id);
+
+ private:
+ TensorsDataManager(TensorsDataManager const&) = delete;
+ TensorsDataManager& operator=(TensorsDataManager const&) = delete;
+
+ std::unordered_map<int, std::unique_ptr<TensorsData>> map_;
+ int nextId_;
+};
+
+} // ml
+} // extension
+#endif // __ML_TENSORS_DATA_MANAGER_H__
return PlatformResult(ErrorCode::NO_ERROR);
}
-TensorsInfoManager::TensorsInfoManager() : nextId_(0) {
+TensorsInfoManager::TensorsInfoManager(TensorsDataManager* tensors_data_manager)
+ : nextId_(0), tensors_data_manager_(tensors_data_manager) {
ScopeLogger();
}
return PlatformResult(ErrorCode::NO_ERROR);
}
+TensorsData* TensorsInfoManager::CreateTensorsData(TensorsInfo* tensors_info) {
+ ScopeLogger();
+ if (nullptr == tensors_info) {
+ LoggerE("Could not find tensor");
+ return nullptr;
+ }
+ // create clone of tensors_info, this clone has to be disposed
+ // on TensorsData disposal
+ TensorsInfo* t_info = CloneTensorsInfo(tensors_info);
+
+ return tensors_data_manager_->CreateTensorsData(t_info);
+};
+
} // ml
} // extension
#include "common/logger.h"
#include "common/platform_result.h"
+#include "ml_tensors_data_manager.h"
#include "ml_utils.h"
#include <nnstreamer/nnstreamer.h>
class TensorsInfoManager {
public:
- TensorsInfoManager();
+ TensorsInfoManager(TensorsDataManager* tensors_data_manager);
~TensorsInfoManager();
TensorsInfo* CreateTensorsInfo();
// handle will be destroyed on TensorsInfo object destruction
PlatformResult DisposeTensorsInfo(ml_tensors_info_h handle);
PlatformResult DisposeTensorsInfo(TensorsInfo* t);
+ TensorsData* CreateTensorsData(TensorsInfo* tensors_info);
+
private:
TensorsInfoManager(TensorsInfoManager const&) = delete;
TensorsInfoManager& operator=(TensorsInfoManager const&) = delete;
std::map<int, std::shared_ptr<TensorsInfo>> map_by_id_;
std::map<ml_tensors_info_h, std::shared_ptr<TensorsInfo>> map_by_handle_;
int nextId_;
+ TensorsDataManager* tensors_data_manager_;
};
} // ml