//Pipeline::getNodeInfo() end
//Pipeline::getSource() begin
+var ValidInputTensorsInfoExceptions = ['NotFoundError', 'AbortError'];
+function Source(name, pipeline_id) {
+ Object.defineProperties(this, {
+ name: {
+ enumerable: true,
+ value: name
+ },
+ inputTensorsInfo: {
+ get: function() {
+ var result = native_.callSync('MLPipelineGetInputTensorsInfo', {
+ id: this._pipeline_id,
+ name: this.name
+ });
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidInputTensorsInfoExceptions,
+ AbortError
+ );
+ }
+ return new TensorsInfo(result.id);
+ }
+ },
+ _pipeline_id: {
+ value: pipeline_id
+ }
+ });
+}
+
+var ValidPipelineGetSourceExceptions = [
+ 'InvalidStateError',
+ 'InvalidValuesError',
+ 'NotFoundError',
+ 'NotSupportedError',
+ 'AbortError'
+];
+
+Pipeline.prototype.getSource = function() {
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'name',
+ type: validator_.Types.STRING
+ }
+ ]);
+
+ if (!args.has.name) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: name is mandatory'
+ );
+ }
+
+ var nativeArgs = {
+ id: this._id,
+ name: args.name
+ };
+
+ var result = native_.callSync('MLPipelineGetSource', nativeArgs);
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidPipelineGetSourceExceptions,
+ AbortError
+ );
+ }
+
+ return new Source(args.name, this._id);
+};
//Pipeline::getSource() end
//Pipeline::getSwitch() begin
'ml_pipeline_nodeinfo.h',
'ml_pipeline_switch.cc',
'ml_pipeline_switch.h',
- #TODO pipeline Source
- #TODO pipeline Valve
+ 'ml_pipeline_source.h',
+ 'ml_pipeline_source.cc',
'ml_pipeline_valve.h',
'ml_pipeline_valve.cc',
'ml_tensors_data_manager.cc',
MlInstance::MlInstance()
: tensors_info_manager_{&tensors_data_manager_},
single_manager_{&tensors_info_manager_},
- pipeline_manager_{this} {
+ pipeline_manager_{this, &tensors_info_manager_} {
ScopeLogger();
using namespace std::placeholders;
REGISTER_METHOD(MLPipelineGetValve);
REGISTER_METHOD(MLPipelineNodeInfoGetProperty);
REGISTER_METHOD(MLPipelineNodeInfoSetProperty);
+ REGISTER_METHOD(MLPipelineGetSource);
+ REGISTER_METHOD(MLPipelineGetInputTensorsInfo);
// Pipeline API end
#undef REGISTER_METHOD
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
+void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+
+ CHECK_ARGS(args, kId, double, out);
+ CHECK_ARGS(args, kName, std::string, out);
+
+ auto name = args.get(kName).get<std::string>();
+ auto id = static_cast<int>(args.get(kId).get<double>());
+
+ PlatformResult result = pipeline_manager_.GetSource(id, name);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ ReportSuccess(out);
+}
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
+void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: [%s]", args.serialize().c_str());
+
+ CHECK_ARGS(args, kId, double, out);
+ CHECK_ARGS(args, kName, std::string, out);
+ auto id = static_cast<int>(args.get(kId).get<double>());
+ const auto& name = args.get(kName).get<std::string>();
+
+ int res_id = -1;
+ PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ ReportSuccess(out);
+}
// Source::inputTensorsInfo end
// Source::inputData() begin
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
-
+ void MLPipelineGetSource(const picojson::value& args, picojson::object& out);
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
-
+ void MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out);
// Source::inputTensorsInfo end
// Source::inputData() begin
valves_.clear();
+ sources_.clear();
+
auto ret = ml_pipeline_destroy(pipeline_);
if (ML_ERROR_NONE != ret) {
LoggerE("ml_pipeline_destroy() failed: [%d] (%s)", ret, get_error_message(ret));
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
+PlatformResult Pipeline::GetSource(const std::string& name) {
+ ScopeLogger("id: [%d], name: [%s]", id_, name.c_str());
+
+ auto source_it = sources_.find(name);
+ if (sources_.end() != source_it) {
+ LoggerD("Source [%s] found", name.c_str());
+ return PlatformResult{};
+ }
+ std::unique_ptr<Source> source_ptr;
+ auto ret = Source::CreateSource(name, pipeline_, &source_ptr);
+ if (ret) {
+ sources_.insert({name, std::move(source_ptr)});
+ }
+ return ret;
+}
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
return PlatformResult{ErrorCode::NOT_FOUND_ERR, "NodeInfo not found"};
}
- auto ret = nodeinfo_it->second->getProperty(name, type, property);
-
- return ret;
+ return nodeinfo_it->second->getProperty(name, type, property);
}
// NodeInfo::getProperty() end
return PlatformResult{ErrorCode::NOT_FOUND_ERR, "NodeInfo not found"};
}
- auto ret = nodeinfo_it->second->setProperty(name, type, property);
- return ret;
+ return nodeinfo_it->second->setProperty(name, type, property);
}
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
+PlatformResult Pipeline::getInputTensorsInfo(const std::string& name, ml_tensors_info_h* result) {
+ ScopeLogger();
+ auto source_it = sources_.find(name);
+ if (sources_.end() == source_it) {
+ LoggerD("Source [%s] not found", name.c_str());
+ return PlatformResult{ErrorCode::NOT_FOUND_ERR, "Source not found"};
+ }
+
+ return source_it->second->getInputTensorsInfo(result);
+}
// Source::inputTensorsInfo end
// Source::inputData() begin
#include "common/picojson.h"
#include "common/platform_result.h"
#include "ml_pipeline_nodeinfo.h"
+#include "ml_pipeline_source.h"
#include "ml_pipeline_switch.h"
#include "ml_pipeline_valve.h"
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
-
+ PlatformResult GetSource(const std::string& name);
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
-
+ PlatformResult getInputTensorsInfo(const std::string& name, ml_tensors_info_h* result);
// Source::inputTensorsInfo end
// Source::inputData() begin
std::unordered_map<std::string, std::unique_ptr<Switch>> switches_;
std::map<std::string, std::unique_ptr<NodeInfo>> node_info_;
std::unordered_map<std::string, std::unique_ptr<Valve>> valves_;
+ std::map<std::string, std::unique_ptr<Source>> sources_;
static void PipelineStateChangeListener(ml_pipeline_state_e state, void* user_data);
};
namespace extension {
namespace ml {
-PipelineManager::PipelineManager(common::Instance* instance_ptr) : instance_ptr_{instance_ptr} {
+PipelineManager::PipelineManager(common::Instance* instance_ptr,
+ TensorsInfoManager* tensors_info_manager)
+ : instance_ptr_{instance_ptr}, tensors_info_manager_{tensors_info_manager} {
ScopeLogger();
}
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
+PlatformResult PipelineManager::GetSource(int pipeline_id, const std::string& name) {
+ ScopeLogger("name: [%s], pipeline_id: [%d]", name.c_str(), pipeline_id);
+
+ auto pipeline_it = pipelines_.find(pipeline_id);
+ if (pipelines_.end() == pipeline_it) {
+ LoggerD("Pipeline not found: [%d]", pipeline_id);
+ return PlatformResult{ErrorCode::NOT_FOUND_ERR, "Pipeline not found"};
+ }
+ return pipeline_it->second->GetSource(name);
+}
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
+PlatformResult PipelineManager::getInputTensorsInfo(int id, const std::string& name, int* res_id) {
+ ScopeLogger();
+
+ auto pipeline_it = pipelines_.find(id);
+ if (pipelines_.end() == pipeline_it) {
+ LoggerD("Pipeline not found: [%d]", id);
+ return PlatformResult{ErrorCode::NOT_FOUND_ERR, "Pipeline not found"};
+ }
+ ml_tensors_info_h in_info = nullptr;
+ PlatformResult ret = pipeline_it->second->getInputTensorsInfo(name, &in_info);
+ if (!ret) {
+ return ret;
+ }
+ auto tensor_info = tensors_info_manager_->CreateTensorsInfo(in_info);
+ *res_id = tensor_info->Id();
+ return PlatformResult{};
+}
// Source::inputTensorsInfo end
// Source::inputData() begin
#include "common/platform_result.h"
#include "ml_pipeline.h"
+#include "ml_tensors_info_manager.h"
using common::PlatformResult;
class PipelineManager {
public:
- PipelineManager(common::Instance* instance_ptr);
+ PipelineManager(common::Instance* instance_ptr, TensorsInfoManager* tim);
~PipelineManager();
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
-
+ PlatformResult GetSource(int pipeline_id, const std::string& name);
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
-
+ PlatformResult getInputTensorsInfo(int id, const std::string& name, int* res_id);
// Source::inputTensorsInfo end
// Source::inputData() begin
// Valve::setOpen() end
private:
common::Instance* instance_ptr_;
+ TensorsInfoManager* tensors_info_manager_;
std::map<int, std::unique_ptr<Pipeline>> pipelines_;
};
--- /dev/null
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ml_pipeline_source.h"
+#include "ml_utils.h"
+
+using common::PlatformResult;
+using common::ErrorCode;
+
+namespace extension {
+namespace ml {
+namespace pipeline {
+
+PlatformResult Source::CreateSource(const std::string& name, ml_pipeline_h pipeline,
+ std::unique_ptr<Source>* out) {
+ ScopeLogger("name: [%s], pipeline: [%p]", name.c_str(), pipeline);
+ ml_pipeline_src_h source_handle = nullptr;
+ auto ret = ml_pipeline_src_get_handle(pipeline, name.c_str(), &source_handle);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_pipeline_src_get_handle() failed: [%d] (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Could not get source");
+ }
+
+ out->reset(new (std::nothrow) Source{name, source_handle});
+ if (!out) {
+ ret = ml_pipeline_src_release_handle(source_handle);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_pipeline_src_release_handle() failed: [%d] (%s)", ret, get_error_message(ret));
+ } else {
+ LoggerD("ml_pipeline_src_release_handle() succeeded");
+ }
+ return LogAndCreateResult(ErrorCode::ABORT_ERR, "Could not get the source",
+ ("Could not allocate memory"));
+ }
+
+ return PlatformResult{};
+}
+
+Source::Source(const std::string& name, ml_pipeline_src_h source_handle)
+ : name_{name}, source_{source_handle} {
+ ScopeLogger("name: [%s], handle: [%p]", name.c_str(), source_handle);
+}
+
+Source::~Source() {
+ ScopeLogger("name: [%s], handle: [%p]", name_.c_str(), source_);
+
+ auto ret = ml_pipeline_src_release_handle(source_);
+ if (ML_ERROR_NONE != ret) {
+ LoggerE("ml_pipeline_src_release_handle() failed: [%d] (%s)", ret, get_error_message(ret));
+ } else {
+ LoggerD("ml_pipeline_src_release_handle() succeeded");
+ }
+}
+
+PlatformResult Source::getInputTensorsInfo(ml_tensors_info_h* result) {
+ ScopeLogger();
+
+ ml_tensors_info_h info = nullptr;
+ auto ret = ml_pipeline_src_get_tensors_info(source_, &info);
+
+ if (ML_ERROR_NONE != ret) {
+ LoggerE(" ml_pipeline_src_get_tensors_info failed: %d (%s)", ret, get_error_message(ret));
+ return util::ToPlatformResult(ret, "Failed to get tensor info");
+ }
+
+ *result = info;
+
+ return PlatformResult{};
+}
+
+} // namespace pipeline
+} // namespace ml
+} // namespace extension
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ML_ML_PIPELINE_SOURCE_H_
+#define ML_ML_PIPELINE_SOURCE_H_
+
+#include <memory>
+#include <string>
+
+#include <nnstreamer/nnstreamer.h>
+
+#include "common/platform_result.h"
+
+using common::PlatformResult;
+
+namespace extension {
+namespace ml {
+namespace pipeline {
+
+class Source {
+ public:
+ static PlatformResult CreateSource(const std::string& name, ml_pipeline_h pipeline,
+ std::unique_ptr<Source>* out);
+
+ ~Source();
+
+ PlatformResult getInputTensorsInfo(ml_tensors_info_h* result);
+
+ Source(const Source&) = delete;
+ Source& operator=(const Source&) = delete;
+
+ private:
+ Source(const std::string& name, ml_pipeline_src_h source_handle);
+ const std::string name_;
+ const ml_pipeline_src_h source_;
+};
+
+} // namespace pipeline
+} // namespace ml
+} // namespace extension
+
+#endif // ML_ML_PIPELINE_SOURCE_H_
\ No newline at end of file