From: Piotr Kosko
Date: Mon, 1 Feb 2021 07:33:51 +0000 (+0000)
Subject: Merge "[systeminfo] Prevent possible crash when failure initialization" into tizen
X-Git-Tag: submit/tizen/20210202.064821^0
X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=fabc6da2918c6e757a1c703ce40984025a73e3ee;hp=dde9626c01553c1516a4ea63d0ccaaf1ee7a9b43;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git
Merge "[systeminfo] Prevent possible crash when failure initialization" into tizen
---
diff --git a/src/ml/js/ml_common.js b/src/ml/js/ml_common.js
index 1c11756..699cddf 100755
--- a/src/ml/js/ml_common.js
+++ b/src/ml/js/ml_common.js
@@ -26,25 +26,22 @@ var MAX_TENSORS_INFO_COUNT = 16;
// TensorRawData
-var TensorRawData = function() {
+var TensorRawData = function(data, size, shape) {
Object.defineProperties(this, {
data: {
enumerable: true,
- get: function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
- }
+ writable: false,
+ value: data
},
size: {
enumerable: true,
- get: function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
- }
+ writable: false,
+ value: size
},
shape: {
enumerable: true,
- get: function() {
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
- }
+ writable: false,
+ value: shape
}
});
};
@@ -63,6 +60,47 @@ var TensorType = {
UNKNOWN: 'UNKNOWN'
};
+function _GetBufferTypeFromTensorType(tensorType) {
+ switch (tensorType) {
+ case 'INT8':
+ return Int8Array;
+ case 'UINT8':
+ return Uint8Array;
+ case 'INT16':
+ return Int16Array;
+ case 'UINT16':
+ return Uint16Array;
+ case 'FLOAT32':
+ return Float32Array;
+ case 'INT32':
+ return Int32Array;
+ case 'UINT32':
+ return Uint32Array;
+ case 'FLOAT64':
+ return Float64Array;
+ case 'INT64':
+ return BigInt64Array;
+ case 'UINT64':
+ return BigUint64Array;
+ }
+ return Uint8Array;
+}
+
+function _CheckIfArrayHasOnlyNumbersAndThrow(array, arrayName) {
+ if (xwalk.utils.type.isNullOrUndefined(array)) {
+ return;
+ }
+
+ array.forEach(function(d) {
+ if (Number.isInteger(d) == false) {
+ throw new WebAPIException(
+ WebAPIException.TYPE_MISMATCH_ERR,
+ arrayName + ' array has to contain only integers'
+ );
+ }
+ });
+}
+
// TensorsData
var _ValidTensorsDataIds = new Set();
@@ -99,13 +137,150 @@ var TensorsData = function(id, tensorsInfoId) {
};
TensorsData.prototype.getTensorRawData = function() {
- _CheckIfTensorsDataNotDisposed();
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+ _CheckIfTensorsDataNotDisposed(this._id);
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'index',
+ type: types_.LONG
+ },
+ {
+ name: 'location',
+ type: types_.ARRAY,
+ optional: true
+ },
+ {
+ name: 'size',
+ type: types_.ARRAY,
+ optional: true
+ }
+ ]);
+
+ if (!args.has.index) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: index is undefined'
+ );
+ }
+
+ _CheckIfArrayHasOnlyNumbersAndThrow(args.location, 'location');
+ _CheckIfArrayHasOnlyNumbersAndThrow(args.size, 'size');
+
+ var callArgs = {
+ tensorsDataId: this._id,
+ index: args.index,
+ location: args.location ? args.location : [],
+ size: args.size ? args.size : []
+ };
+
+ var result = native_.callSync('MLTensorsDataGetTensorRawData', callArgs);
+
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ TensorsInfoGettersSettersValidExceptions,
+ AbortError
+ );
+ }
+
+ // TODO: modify StringToArray to accept also float types, not only int
+ var data = privUtils_.StringToArray(result.buffer, Uint8Array);
+ var ArrayType = _GetBufferTypeFromTensorType(result.type);
+ var shape = result.shape;
+ return new TensorRawData(new ArrayType(data.buffer), data.byteLength, shape);
};
-TensorsData.prototype.setTensorData = function() {
- _CheckIfTensorsDataNotDisposed();
- throw new WebAPIException(WebAPIException.ABORT_ERR, 'Not implemented');
+var TensorsDataSetTensorRawDataExceptions = [
+ 'InvalidValuesError',
+ 'TypeMismatchError',
+ 'NotSupportedError',
+ 'AbortError'
+];
+
+function ValidateBufferForTensorsData(tensorsData, index, buffer) {
+ var result = native_.callSync('MLTensorsDataGetTensorType', {
+ tensorsDataId: tensorsData._id,
+ index: index
+ });
+
+ if (native_.isFailure(result)) {
+ throw AbortError;
+ }
+ var tensorType = native_.getResultObject(result);
+ var ret = buffer;
+
+ var ArrayType = _GetBufferTypeFromTensorType(tensorType);
+ if (Array.isArray(buffer)) {
+ // in case of standard Array - create TypedArray from it
+ ret = new ArrayType(buffer);
+ } else if (false == buffer instanceof ArrayType) {
+ throw new WebAPIException(
+ WebAPIException.TYPE_MISMATCH_ERR,
+ 'buffer array has incompatible type, expected: ' +
+ ArrayType.name +
+ ', got: ' +
+ x.constructor.name
+ );
+ }
+ return ret;
+}
+
+TensorsData.prototype.setTensorRawData = function() {
+ _CheckIfTensorsDataNotDisposed(this._id);
+ var argsIndex = validator_.validateArgs(arguments, [
+ {
+ name: 'index',
+ type: types_.LONG
+ }
+ ]);
+ var argsLocSize = validator_.validateArgs(Array.prototype.slice.call(arguments, 2), [
+ {
+ name: 'location',
+ type: types_.ARRAY,
+ optional: true
+ },
+ {
+ name: 'size',
+ type: types_.ARRAY,
+ optional: true
+ }
+ ]);
+
+ if (!argsIndex.has.index) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: index is undefined'
+ );
+ }
+
+ if (arguments.length < 2) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: buffer is undefined'
+ );
+ }
+ var buffer = ValidateBufferForTensorsData(this, argsIndex.index, arguments[1]);
+
+ _CheckIfArrayHasOnlyNumbersAndThrow(argsLocSize.location, 'location');
+ _CheckIfArrayHasOnlyNumbersAndThrow(argsLocSize.size, 'size');
+
+ // TODO: modify ArrayToString to accept also float types, not only int
+ var encodedData = privUtils_.ArrayToString(new Uint8Array(buffer.buffer));
+ var callArgs = {
+ index: argsIndex.index,
+ tensorsDataId: this._id,
+ buffer: encodedData,
+ location: argsLocSize.location ? argsLocSize.location : [],
+ size: argsLocSize.size ? argsLocSize.size : []
+ };
+ var result = native_.callSync('MLTensorsDataSetTensorRawData', callArgs);
+
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ TensorsDataSetTensorRawDataExceptions,
+ AbortError
+ );
+ }
};
TensorsData.prototype.dispose = function() {
@@ -209,14 +384,7 @@ TensorsInfo.prototype.addTensorInfo = function() {
}
]);
- args.dimensions.forEach(function(d) {
- if (Number.isInteger(d) == false) {
- throw new WebAPIException(
- WebAPIException.TYPE_MISMATCH_ERR,
- 'dimensions array has to contain only integers'
- );
- }
- });
+ _CheckIfArrayHasOnlyNumbersAndThrow(args.dimensions, 'dimensions');
var callArgs = {
name: args.name,
@@ -337,14 +505,7 @@ TensorsInfo.prototype.setDimensions = function() {
}
]);
- args.dimensions.forEach(function(d) {
- if (Number.isInteger(d) == false) {
- throw new WebAPIException(
- WebAPIException.TYPE_MISMATCH_ERR,
- 'dimensions array has to contain only integers'
- );
- }
- });
+ _CheckIfArrayHasOnlyNumbersAndThrow(args.dimensions, 'dimensions');
var callArgs = {
index: args.index,
diff --git a/src/ml/js/ml_pipeline.js b/src/ml/js/ml_pipeline.js
index 425e34c..cf92f3f 100755
--- a/src/ml/js/ml_pipeline.js
+++ b/src/ml/js/ml_pipeline.js
@@ -208,7 +208,75 @@ Pipeline.prototype.getNodeInfo = function() {
//Pipeline::getNodeInfo() end
//Pipeline::getSource() begin
+var ValidInputTensorsInfoExceptions = ['NotFoundError', 'AbortError'];
+function Source(name, pipeline_id) {
+ Object.defineProperties(this, {
+ name: {
+ enumerable: true,
+ value: name
+ },
+ inputTensorsInfo: {
+ get: function() {
+ var result = native_.callSync('MLPipelineGetInputTensorsInfo', {
+ id: this._pipeline_id,
+ name: this.name
+ });
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidInputTensorsInfoExceptions,
+ AbortError
+ );
+ }
+
+ return new TensorsInfo(result.id);
+ }
+ },
+ _pipeline_id: {
+ value: pipeline_id
+ }
+ });
+}
+
+var ValidPipelineGetSourceExceptions = [
+ 'InvalidStateError',
+ 'InvalidValuesError',
+ 'NotFoundError',
+ 'NotSupportedError',
+ 'AbortError'
+];
+
+Pipeline.prototype.getSource = function() {
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'name',
+ type: validator_.Types.STRING
+ }
+ ]);
+
+ if (!args.has.name) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: name is mandatory'
+ );
+ }
+
+ var nativeArgs = {
+ id: this._id,
+ name: args.name
+ };
+ var result = native_.callSync('MLPipelineGetSource', nativeArgs);
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidPipelineGetSourceExceptions,
+ AbortError
+ );
+ }
+
+ return new Source(args.name, this._id);
+};
//Pipeline::getSource() end
//Pipeline::getSwitch() begin
@@ -261,6 +329,11 @@ Pipeline.prototype.getSwitch = function() {
//Pipeline::getSwitch() end
//Pipeline::getValve() begin
+var ValidValveIsOpenAndSetOpenExceptions = [
+ 'NotFoundError',
+ 'NotSupportedError',
+ 'AbortError'
+];
function Valve(name, pipeline_id) {
Object.defineProperties(this, {
name: {
@@ -269,6 +342,26 @@ function Valve(name, pipeline_id) {
},
_pipeline_id: {
value: pipeline_id
+ },
+ isOpen: {
+ get: function() {
+ var result = native_.callSync('MLPipelineValveIsOpen', {
+ id: this._pipeline_id,
+ name: this.name
+ });
+
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidValveIsOpenAndSetOpenExceptions,
+ AbortError
+ );
+ }
+
+ return result.result;
+ },
+ set: function() {},
+ enumerable: true
}
});
}
@@ -491,7 +584,36 @@ Switch.prototype.select = function() {
//Switch::select() end
//Valve::setOpen() begin
+Valve.prototype.setOpen = function() {
+ var args = validator_.validateArgs(arguments, [
+ {
+ name: 'open',
+ type: validator_.Types.BOOLEAN
+ }
+ ]);
+
+ if (!args.has.open) {
+ throw new WebAPIException(
+ WebAPIException.INVALID_VALUES_ERR,
+ 'Invalid parameter: open is mandatory'
+ );
+ }
+
+ var nativeArgs = {
+ id: this._pipeline_id,
+ name: this.name,
+ open: args.open
+ };
+ var result = native_.callSync('MLPipelineValveSetOpen', nativeArgs);
+ if (native_.isFailure(result)) {
+ throw native_.getErrorObjectAndValidate(
+ result,
+ ValidValveIsOpenAndSetOpenExceptions,
+ AbortError
+ );
+ }
+};
//Valve::setOpen() end
var MachineLearningPipeline = function() {};
diff --git a/src/ml/ml.gyp b/src/ml/ml.gyp
index 40aebd3..0b08553 100644
--- a/src/ml/ml.gyp
+++ b/src/ml/ml.gyp
@@ -23,8 +23,8 @@
'ml_pipeline_nodeinfo.h',
'ml_pipeline_switch.cc',
'ml_pipeline_switch.h',
- #TODO pipeline Source
- #TODO pipeline Valve
+ 'ml_pipeline_source.h',
+ 'ml_pipeline_source.cc',
'ml_pipeline_valve.h',
'ml_pipeline_valve.cc',
'ml_tensors_data_manager.cc',
diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc
index e4c7833..078b7b5 100644
--- a/src/ml/ml_instance.cc
+++ b/src/ml/ml_instance.cc
@@ -40,10 +40,15 @@ const std::string kDefinition = "definition";
const std::string kPipelineStateChangeListenerName = "listenerName";
const std::string kOtherId = "otherId";
const std::string kPadName = "padName";
+const std::string kOpen = "open";
const std::string kNodeName = "nodeName";
const std::string kProperty = "property";
const std::string kBOOLEAN = "BOOLEAN";
const std::string kSTRING = "STRING";
+const std::string kBuffer = "buffer";
+const std::string kSize = "size";
+const std::string kLocation = "location";
+const std::string kShape = "shape";
} // namespace
using namespace common;
@@ -76,7 +81,7 @@ using namespace common;
MlInstance::MlInstance()
: tensors_info_manager_{&tensors_data_manager_},
single_manager_{&tensors_info_manager_},
- pipeline_manager_{this} {
+ pipeline_manager_{this, &tensors_info_manager_} {
ScopeLogger();
using namespace std::placeholders;
@@ -98,8 +103,13 @@ MlInstance::MlInstance()
REGISTER_METHOD(MLTensorsInfoClone);
REGISTER_METHOD(MLTensorsInfoEquals);
REGISTER_METHOD(MLTensorsInfoDispose);
+ REGISTER_METHOD(MLPipelineValveSetOpen);
+ REGISTER_METHOD(MLPipelineValveIsOpen);
REGISTER_METHOD(MLTensorsDataDispose);
+ REGISTER_METHOD(MLTensorsDataGetTensorRawData);
+ REGISTER_METHOD(MLTensorsDataGetTensorType);
+ REGISTER_METHOD(MLTensorsDataSetTensorRawData);
// Single API begin
REGISTER_METHOD(MLSingleManagerOpenModel);
@@ -128,6 +138,8 @@ MlInstance::MlInstance()
REGISTER_METHOD(MLPipelineGetValve);
REGISTER_METHOD(MLPipelineNodeInfoGetProperty);
REGISTER_METHOD(MLPipelineNodeInfoSetProperty);
+ REGISTER_METHOD(MLPipelineGetSource);
+ REGISTER_METHOD(MLPipelineGetInputTensorsInfo);
// Pipeline API end
#undef REGISTER_METHOD
@@ -602,6 +614,110 @@ void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::obj
}
ReportSuccess(out);
}
+void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsDataId, double, out);
+ CHECK_ARGS(args, kIndex, double, out);
+ CHECK_ARGS(args, kLocation, picojson::array, out);
+ CHECK_ARGS(args, kSize, picojson::array, out);
+
+ int tensor_data_id = static_cast(args.get(kTensorsDataId).get());
+ int index = static_cast(args.get(kIndex).get());
+
+ TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ if (nullptr == tensors_data) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
+ ("Could not find TensorsData handle with given id: %d", tensor_data_id));
+ return;
+ }
+ // TODO: validate location and size - will be done in future commit
+ int location[ML_TENSOR_RANK_LIMIT];
+ int size[ML_TENSOR_RANK_LIMIT];
+ TensorRawData raw_data;
+ PlatformResult result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ std::vector out_data{raw_data.data, raw_data.data + raw_data.size};
+ out[kBuffer] = picojson::value(picojson::string_type, true);
+ common::encode_binary_in_string(out_data, out[kBuffer].get());
+
+ out[kType] = picojson::value(raw_data.type_str);
+ picojson::array shape = picojson::array{};
+ for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
+ shape.push_back(picojson::value{static_cast(raw_data.shape[i])});
+ }
+ out[kShape] = picojson::value{shape};
+
+ ReportSuccess(out);
+}
+
+void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsDataId, double, out);
+ CHECK_ARGS(args, kIndex, double, out);
+
+ int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int index = static_cast(args.get(kIndex).get());
+
+ TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ if (nullptr == tensors_data) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
+ ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ return;
+ }
+
+ std::string tensor_type_string;
+ PlatformResult result =
+ types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
+ if (!result) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ &out,
+ ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ return;
+ }
+
+ picojson::value val = picojson::value{tensor_type_string};
+ ReportSuccess(val, out);
+}
+
+void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+ CHECK_ARGS(args, kTensorsDataId, double, out);
+ CHECK_ARGS(args, kIndex, double, out);
+ CHECK_ARGS(args, kBuffer, std::string, out);
+ CHECK_ARGS(args, kLocation, picojson::array, out);
+ CHECK_ARGS(args, kSize, picojson::array, out);
+
+ int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int index = static_cast(args.get(kIndex).get());
+
+ TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ if (nullptr == tensors_data) {
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
+ ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ return;
+ }
+
+ int location[ML_TENSOR_RANK_LIMIT] = {0, 0, 0, 0};
+ int size[ML_TENSOR_RANK_LIMIT] = {-1, -1, -1, -1};
+ // TODO: validate location and size - will be done in future commit
+
+ const std::string& str_buffer = args.get(kBuffer).get();
+ std::vector buffer;
+ common::decode_binary_from_string(str_buffer, buffer);
+
+ TensorRawData rawData{.data = buffer.data(), .size = buffer.size()};
+ PlatformResult result = tensors_data->SetTensorRawData(index, location, size, rawData);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+
+ ReportSuccess(out);
+}
// Common ML API end
// Single API begin
@@ -901,7 +1017,24 @@ void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::ob
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
+void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+
+ CHECK_ARGS(args, kId, double, out);
+ CHECK_ARGS(args, kName, std::string, out);
+
+ auto name = args.get(kName).get();
+ auto id = static_cast(args.get(kId).get());
+
+ PlatformResult result = pipeline_manager_.GetSource(id, name);
+
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+ ReportSuccess(out);
+}
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
@@ -1030,7 +1163,24 @@ void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, pico
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
+void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: [%s]", args.serialize().c_str());
+
+ CHECK_ARGS(args, kId, double, out);
+ CHECK_ARGS(args, kName, std::string, out);
+
+ auto id = static_cast(args.get(kId).get());
+ const auto& name = args.get(kName).get();
+
+ int res_id = -1;
+ PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
+ if (!result) {
+ LogAndReportError(result, &out);
+ return;
+ }
+ ReportSuccess(out);
+}
// Source::inputTensorsInfo end
// Source::inputData() begin
@@ -1081,9 +1231,48 @@ void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::o
// Switch::select() end
// Valve::setOpen() begin
+void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+
+ CHECK_ARGS(args, kId, double, out);
+ CHECK_ARGS(args, kName, std::string, out);
+ CHECK_ARGS(args, kOpen, bool, out);
+
+ auto name = args.get(kName).get();
+ auto pipeline_id = args.get(kId).get();
+ auto open = args.get(kOpen).get();
+
+ auto ret = pipeline_manager_.ValveSetOpen(pipeline_id, name, open);
+ if (!ret) {
+ LogAndReportError(ret, &out);
+ return;
+ }
+ ReportSuccess(out);
+}
// Valve::setOpen() end
+// Valve::isOpen() begin
+void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
+ ScopeLogger("args: %s", args.serialize().c_str());
+
+ CHECK_ARGS(args, kId, double, out);
+ CHECK_ARGS(args, kName, std::string, out);
+
+ auto name = args.get(kName).get();
+ auto pipeline_id = args.get(kId).get();
+ auto open = true;
+
+ auto ret = pipeline_manager_.ValveIsOpen(pipeline_id, name, &open);
+ if (!ret) {
+ LogAndReportError(ret, &out);
+ return;
+ }
+
+ ReportSuccess(picojson::value{open}, out);
+}
+// Valve::isOpen() end
+
// Pipeline API end
#undef CHECK_EXIST
diff --git a/src/ml/ml_instance.h b/src/ml/ml_instance.h
index 95125fb..6bbc16e 100644
--- a/src/ml/ml_instance.h
+++ b/src/ml/ml_instance.h
@@ -56,6 +56,9 @@ class MlInstance : public common::ParsedInstance {
void MLTensorsInfoDispose(const picojson::value& args, picojson::object& out);
void MLTensorsDataDispose(const picojson::value& args, picojson::object& out);
+ void MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out);
+ void MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out);
+ void MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out);
TensorsInfoManager tensors_info_manager_;
TensorsDataManager tensors_data_manager_;
@@ -103,7 +106,7 @@ class MlInstance : public common::ParsedInstance {
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
-
+ void MLPipelineGetSource(const picojson::value& args, picojson::object& out);
// Pipeline::getSource() end
// Pipeline::getSwitch() begin
@@ -139,7 +142,7 @@ class MlInstance : public common::ParsedInstance {
// NodeInfo::setProperty() end
// Source::inputTensorsInfo begin
-
+ void MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out);
// Source::inputTensorsInfo end
// Source::inputData() begin
@@ -155,8 +158,12 @@ class MlInstance : public common::ParsedInstance {
// Switch::select() end
// Valve::setOpen() begin
-
+ void MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out);
// Valve::setOpen() end
+
+ // Valve::isOpen() begin
+ void MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out);
+ // Valve::isOpen() end
// Pipeline API end
};
diff --git a/src/ml/ml_pipeline.cc b/src/ml/ml_pipeline.cc
index fe35948..1d1cce5 100644
--- a/src/ml/ml_pipeline.cc
+++ b/src/ml/ml_pipeline.cc
@@ -196,6 +196,8 @@ PlatformResult Pipeline::Dispose() {
valves_.clear();
+ sources_.clear();
+
auto ret = ml_pipeline_destroy(pipeline_);
if (ML_ERROR_NONE != ret) {
LoggerE("ml_pipeline_destroy() failed: [%d] (%s)", ret, get_error_message(ret));
@@ -210,7 +212,7 @@ PlatformResult Pipeline::Dispose() {
// Pipeline::dispose() end
// Pipeline::getNodeInfo() begin
-PlatformResult Pipeline::GetNodeInfo(std::string& name) {
+PlatformResult Pipeline::GetNodeInfo(const std::string& name) {
ScopeLogger("id_: [%d], name: [%s]", id_, name.c_str());
auto nodeinfo_it = node_info_.find(name);
@@ -233,7 +235,22 @@ PlatformResult Pipeline::GetNodeInfo(std::string& name) {
// Pipeline::getNodeInfo() end
// Pipeline::getSource() begin
+PlatformResult Pipeline::GetSource(const std::string& name) {
+ ScopeLogger("id: [%d], name: [%s]", id_, name.c_str());
+ auto source_it = sources_.find(name);
+ if (sources_.end() != source_it) {
+ LoggerD("Source [%s] found", name.c_str());
+ return PlatformResult{};
+ }
+
+ std::unique_ptr