From: Piotr Kosko/Tizen API (PLT) /SRPOL/Engineer/Samsung Electronics
Date: Thu, 10 Mar 2022 14:13:33 +0000 (+0100)
Subject: [ML] Disable ML Trainer for TV build profile
X-Git-Tag: submit/tizen/20220325.070959~4
X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=bd7d748831aa290bb88d6375e8267d78d728f40c;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git
[ML] Disable ML Trainer for TV build profile
[verification] Compilation succeeds for mobile and tv profile.
Mobile profile includes all C++ implementation dependant of nntrainer.
TV profile build does not include any C++ implementation of
tizen.ml.trainer methods.
Tested in chrome console that for Mobile - tizen.ml.trainer is available
and working properly. For TV feature is disabled (doesn't work), but
there are no crashes.
Change-Id: I55dd846d7ba8303a0422deabc9aa2b6dbfe27612
---
diff --git a/packaging/webapi-plugins.spec b/packaging/webapi-plugins.spec
index 98c29791..bf182419 100644
--- a/packaging/webapi-plugins.spec
+++ b/packaging/webapi-plugins.spec
@@ -60,6 +60,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_common_feature_messaging_support 0
%define tizen_common_feature_metadata_support 1
%define tizen_common_feature_ml_support 1
+%define tizen_common_feature_ml_nntrainer_support 1
%define tizen_common_feature_nbs_support 0
%define tizen_common_feature_nfc_support 0
%define tizen_common_feature_nfc_emulation_support 0
@@ -124,6 +125,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_mobile_feature_messaging_support 1
%define tizen_mobile_feature_metadata_support 1
%define tizen_mobile_feature_ml_support 1
+%define tizen_mobile_feature_ml_nntrainer_support 1
%define tizen_mobile_feature_nfc_support 0
%define tizen_mobile_feature_nfc_emulation_support 0
%define tizen_mobile_feature_notification_support 1
@@ -232,6 +234,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_wearable_feature_messaging_support 0
%define tizen_wearable_feature_metadata_support 1
%define tizen_wearable_feature_ml_support 1
+%define tizen_wearable_feature_ml_nntrainer_support 1
%define tizen_wearable_feature_nfc_support 1
%define tizen_wearable_feature_nfc_emulation_support 0
%define tizen_wearable_feature_notification_support 1
@@ -313,6 +316,8 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_tv_feature_messaging_support 0
%define tizen_tv_feature_metadata_support 1
%define tizen_tv_feature_ml_support 1
+#TODO -- enable when native API will be supported on TV
+%define tizen_tv_feature_ml_nntrainer_support 0
%define tizen_tv_feature_nbs_support 0
%define tizen_tv_feature_nfc_support 0
%define tizen_tv_feature_nfc_emulation_support 0
@@ -373,6 +378,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_feature_message_port_support %{expand:%tizen_%{?profile}_feature_message_port_support}
%define tizen_feature_messaging_support %{expand:%tizen_%{?profile}_feature_messaging_support}
%define tizen_feature_ml_support %{expand:%tizen_%{?profile}_feature_ml_support}
+%define tizen_feature_ml_nntrainer_support %{expand:%tizen_%{?profile}_feature_ml_nntrainer_support}
%define tizen_feature_nfc_emulation_support %{expand:%tizen_%{?profile}_feature_nfc_emulation_support}
%define tizen_feature_nfc_support %{expand:%tizen_%{?profile}_feature_nfc_support}
%define tizen_feature_notification_support %{expand:%tizen_%{?profile}_feature_notification_support}
@@ -533,10 +539,14 @@ BuildRequires: pkgconfig(db-util)
%if "%{?tizen_feature_ml_support}" == "1" || "%{?unified_build}" == "1"
BuildRequires: pkgconfig(nnstreamer)
BuildRequires: pkgconfig(capi-ml-inference)
+%endif
+
+%if "%{?tizen_feature_ml_nntrainer_support}" == "1" || "%{?unified_build}" == "1"
BuildRequires: pkgconfig(nntrainer)
BuildRequires: pkgconfig(capi-ml-training)
%endif
+
%if "%{?tizen_feature_badge_support}" == "1" || "%{?unified_build}" == "1"
BuildRequires: pkgconfig(badge)
%endif
@@ -739,6 +749,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_mobile_f
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_mobile_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_mobile_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_mobile_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_mobile_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_mobile_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_mobile_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_mobile_feature_nfc_support}"
@@ -831,6 +842,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_mobile_f
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_mobile_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_mobile_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_mobile_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_mobile_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_mobile_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_mobile_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_mobile_feature_nfc_support}"
@@ -911,6 +923,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_wearable
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_wearable_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_wearable_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_wearable_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_wearable_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_wearable_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_wearable_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_wearable_feature_nfc_support}"
@@ -998,6 +1011,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_wearable
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_wearable_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_wearable_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_wearable_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_wearable_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_wearable_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_wearable_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_wearable_feature_nfc_support}"
@@ -1078,6 +1092,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_tv_featu
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_tv_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_tv_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_tv_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_tv_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_tv_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_tv_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_tv_feature_nfc_support}"
@@ -1156,6 +1171,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_common_f
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_common_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_common_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_common_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_common_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_common_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_common_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_common_feature_nfc_support}"
diff --git a/src/ml/ml.gyp b/src/ml/ml.gyp
index 9f3534c1..ff6e5323 100644
--- a/src/ml/ml.gyp
+++ b/src/ml/ml.gyp
@@ -37,10 +37,6 @@
'ml_tensors_info_manager.h',
'ml_single_manager.cc',
'ml_single_manager.h',
- 'ml_trainer_manager.cc',
- 'ml_trainer_manager.h',
- 'ml_trainer_objects.cc',
- 'ml_trainer_objects.h',
'ml_singleshot.cc',
'ml_singleshot.h',
'ml_utils.cc',
@@ -52,10 +48,25 @@
'packages': [
'nnstreamer',
'capi-ml-inference',
+ ]
+ },
+ }],
+ ['tizen_feature_ml_nntrainer_support == 1', {
+ 'variables': {
+ 'packages': [
'nntrainer',
'capi-ml-training',
- ]
+ ],
},
+ 'sources': [
+ 'ml_trainer_manager.cc',
+ 'ml_trainer_manager.h',
+ 'ml_trainer_objects.cc',
+ 'ml_trainer_objects.h',
+ ],
+ 'defines': [
+ 'NNTRAINER_SUPPORTED',
+ ],
}],
],
},
diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc
index 9d924408..eec6ca72 100644
--- a/src/ml/ml_instance.cc
+++ b/src/ml/ml_instance.cc
@@ -15,16 +15,17 @@
*/
#include "ml_instance.h"
-#include "ml_utils.h"
#include "common/converter.h"
#include "common/logger.h"
#include "common/picojson.h"
#include "common/platform_result.h"
#include "common/tools.h"
+#include "ml_utils.h"
static_assert(ML_TENSOR_RANK_LIMIT == 4,
- "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
+ "This implementation requires different ML_TENSOR_RANK_LIMIT. "
+ "Please fix the code.");
namespace extension {
namespace ml {
@@ -91,17 +92,19 @@ const std::string kValue = "value";
using namespace common;
-#define CHECK_EXIST(args, name, out) \
- if (!args.contains(name)) { \
- std::string msg = std::string(name) + " is required argument"; \
- LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
- return; \
+#define CHECK_EXIST(args, name, out) \
+ if (!args.contains(name)) { \
+ std::string msg = std::string(name) + " is required argument"; \
+ LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), \
+ &out); \
+ return; \
}
-// CHECK_TYPE will throw AbortError by default, but it can be changed by providing
-// additional parameter to the macro, i.e.:
-// CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
-#define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
+// CHECK_TYPE will throw AbortError by default, but it can be changed by
+// providing additional parameter to the macro, i.e.: CHECK_TYPE(args, "name",
+// std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
+#define CHECK_TYPE(...) \
+ CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
#define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
#define CHECK_TYPE_5(args, name, type, out, error_type) \
if (!args.get(name).is()) { \
@@ -123,10 +126,13 @@ MlInstance::MlInstance()
ScopeLogger();
using namespace std::placeholders;
-#define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
-#define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
+#define REGISTER_METHOD(M) \
+ RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
+#define REGISTER_BINARY_METHOD(M) \
+ RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
#define REGISTER_METHOD_WITH_BINARY_ANWSER(M) \
- RegisterSyncHandlerWithBinaryAnswer(#M, std::bind(&MlInstance::M, this, _1, _2))
+ RegisterSyncHandlerWithBinaryAnswer(#M, \
+ std::bind(&MlInstance::M, this, _1, _2))
REGISTER_METHOD(MLCheckNNFWAvailability);
REGISTER_METHOD(MLTensorsInfoCountGetter);
@@ -183,6 +189,7 @@ MlInstance::MlInstance()
REGISTER_METHOD(MLPipelineManagerCustomFilterOutput);
REGISTER_METHOD(MLPipelineManagerUnregisterCustomFilter);
+#ifdef NNTRAINER_SUPPORTED
REGISTER_METHOD(MLTrainerLayerSetProperty);
REGISTER_METHOD(MLTrainerLayerCreate);
REGISTER_METHOD(MLTrainerLayerGetName);
@@ -204,6 +211,7 @@ MlInstance::MlInstance()
REGISTER_METHOD(MLTrainerDatasetCreateFromFile);
REGISTER_METHOD(MLTrainerDatasetSetProperty);
REGISTER_METHOD(MLTrainerDatasetDispose);
+#endif
#undef REGISTER_METHOD
}
@@ -221,7 +229,8 @@ TensorsDataManager& MlInstance::GetTensorsDataManager() {
return tensors_data_manager_;
}
-void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLCheckNNFWAvailability(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_EXIST(args, kNnfw, out)
CHECK_EXIST(args, kHw, out)
@@ -233,34 +242,41 @@ void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::
if (args.get(kCustomRequirement).is()) {
customRequirement = args.get(kCustomRequirement).get();
}
- bool availability_val = util::CheckNNFWAvailability(nnfw, hw, customRequirement);
+ bool availability_val =
+ util::CheckNNFWAvailability(nnfw, hw, customRequirement);
picojson::value available = picojson::value{availability_val};
ReportSuccess(available, out);
}
-void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not create new TensorsInfo handle"));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not create new TensorsInfo handle"));
return;
}
out[kId] = picojson::value(static_cast(tensorsInfo->Id()));
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
unsigned int count = 0;
@@ -273,7 +289,8 @@ void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson:
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kType, std::string, out);
@@ -282,20 +299,26 @@ void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojso
CHECK_ARGS(args, kDimensions, picojson::array, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
const std::string& tensorType = args.get(kType).get();
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+ PlatformResult result =
+ types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting value of TensorType"),
&out,
- ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getValue() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -325,16 +348,20 @@ void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
@@ -355,17 +382,21 @@ void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kDimensions, picojson::array, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
@@ -386,16 +417,20 @@ void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
@@ -409,17 +444,21 @@ void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kName, std::string, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
@@ -433,16 +472,20 @@ void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
@@ -457,22 +500,27 @@ void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
+ PlatformResult result =
+ tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -480,9 +528,11 @@ void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojso
std::string tensorTypeString;
result = types::TensorTypeEnum.getName(tensorTypeEnum, &tensorTypeString);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting name of TensorType"),
&out,
- ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getName() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -490,27 +540,34 @@ void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kType, std::string, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
const std::string& tensorType = args.get(kType).get();
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+ PlatformResult result =
+ types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting value of TensorType"),
&out,
- ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getValue() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -523,46 +580,58 @@ void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
- TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
+ TensorsData* tensorsData =
+ GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
if (!tensorsData) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not create TensorsData"));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not create TensorsData"));
return;
}
out[kTensorsDataId] = picojson::value(static_cast(tensorsData->Id()));
- out[kTensorsInfoId] = picojson::value(static_cast(tensorsData->TensorsInfoId()));
+ out[kTensorsInfoId] =
+ picojson::value(static_cast(tensorsData->TensorsInfoId()));
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoClone(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
if (nullptr == cloned) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
return;
}
@@ -570,7 +639,8 @@ void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::objec
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoEquals(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kOtherId, double, out);
@@ -580,15 +650,18 @@ void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::obje
TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
if (nullptr == first) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", firstId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not find TensorsInfo handle with given id: %d", firstId));
return;
}
TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
if (nullptr == second) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", secondId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", secondId));
return;
}
@@ -597,27 +670,35 @@ void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::obje
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
+ PlatformResult result =
+ GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
if (!result) {
LogAndReportError(result, &out);
}
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
@@ -627,7 +708,8 @@ void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::obj
}
// Dispose underlying tensorsInfo
- PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
+ PlatformResult result =
+ GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
if (!result) {
LogAndReportError(result, &out);
return;
@@ -641,7 +723,8 @@ void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::obj
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
@@ -651,31 +734,36 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico
int tensor_data_id = static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensor_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensor_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -688,7 +776,8 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico
return;
}
- std::vector out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
+ std::vector out_data{raw_data.data,
+ raw_data.data + raw_data.size_in_bytes};
out[kBuffer] = picojson::value(picojson::string_type, true);
common::encode_binary_in_string(out_data, out[kBuffer].get());
@@ -702,8 +791,8 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
- std::vector* out) {
+void MlInstance::MLTensorsDataGetTensorRawDataBinary(
+ const picojson::value& args, std::vector* out) {
ScopeLogger("args: %s", args.serialize().c_str());
// TODO handle errors to out
// CHECK_ARGS(args, kTensorsDataId, double, out);
@@ -714,17 +803,20 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
int tensor_data_id = static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LoggerE("Could not find TensorsData handle with given id: %d", tensor_data_id);
- tools::ReportErrorToBinary(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
- out);
+ LoggerE("Could not find TensorsData handle with given id: %d",
+ tensor_data_id);
+ tools::ReportErrorToBinary(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ out);
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
@@ -732,7 +824,8 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
@@ -740,8 +833,8 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
@@ -766,7 +859,8 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
}
out_json[kShape] = picojson::value{shape};
- std::vector out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
+ std::vector out_data{raw_data.data,
+ raw_data.data + raw_data.size_in_bytes};
// FORMAT:
// 4 byte === JSON lenght (N)
@@ -777,28 +871,36 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
tools::ReportDataToBinary(out_data, out);
}
-void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
std::string tensor_type_string;
- PlatformResult result =
- types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
+ PlatformResult result = types::TensorTypeEnum.getName(
+ tensors_data->GetTensorType(index), &tensor_type_string);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting name of TensorType"),
&out,
- ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getName() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -806,46 +908,54 @@ void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger();
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kBuffer, std::string, out);
CHECK_ARGS(args, kLocation, picojson::array, out);
CHECK_ARGS(args, kSize, picojson::array, out);
- LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
+ LoggerD("%s, %s", kTensorsDataId.c_str(),
+ args.get(kTensorsDataId).serialize().c_str());
LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -864,7 +974,8 @@ void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size,
+void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data,
+ size_t data_size,
picojson::object& out) {
ScopeLogger();
/*
@@ -879,7 +990,8 @@ void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t da
*/
unsigned int call_args_len_begin = 0;
unsigned int call_args_len = static_cast(
- (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) +
+ (data[call_args_len_begin] << 24) +
+ (data[call_args_len_begin + 1] << 16) +
(data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3]));
unsigned int buffer_len_begin = call_args_len_begin + 4;
@@ -899,46 +1011,54 @@ void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t da
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kLocation, picojson::array, out);
CHECK_ARGS(args, kSize, picojson::array, out);
- LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
+ LoggerD("%s, %s", kTensorsDataId.c_str(),
+ args.get(kTensorsDataId).serialize().c_str());
LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
}
- TensorRawData raw_data{reinterpret_cast(const_cast(data + buffer_begin)),
- buffer_len};
+ TensorRawData raw_data{
+ reinterpret_cast(const_cast(data + buffer_begin)),
+ buffer_len};
result = tensors_data->SetTensorRawData(index, location, size, raw_data);
if (!result) {
LogAndReportError(result, &out);
@@ -948,7 +1068,8 @@ void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t da
ReportSuccess(out);
}
-void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleManagerOpenModel(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kModelPath, std::string, out);
CHECK_ARGS(args, kInTensorsInfo, double, out);
@@ -957,7 +1078,8 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
CHECK_ARGS(args, kHwType, std::string, out);
CHECK_ARGS(args, kIsDynamicMode, bool, out);
- const auto& model_path = common::tools::ConvertUriToPath(args.get(kModelPath).get());
+ const auto& model_path =
+ common::tools::ConvertUriToPath(args.get(kModelPath).get());
CHECK_STORAGE_ACCESS(model_path, &out);
TensorsInfo* in_tensors_info = nullptr;
@@ -965,8 +1087,10 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
if (kNoId != inTensorId) {
in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
if (nullptr == in_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", inTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", inTensorId));
return;
}
}
@@ -976,22 +1100,25 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
if (kNoId != outTensorId) {
out_tensors_info = GetTensorsInfoManager().GetTensorsInfo(outTensorId);
if (nullptr == out_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", outTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", outTensorId));
return;
}
}
ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
- PlatformResult result =
- types::NNFWTypeEnum.getValue(args.get(kFwType).get(), &nnfw_e);
+ PlatformResult result = types::NNFWTypeEnum.getValue(
+ args.get(kFwType).get(), &nnfw_e);
if (!result) {
LogAndReportError(result, &out);
return;
}
ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
- result = types::HWTypeEnum.getValue(args.get(kHwType).get(), &hw_e);
+ result =
+ types::HWTypeEnum.getValue(args.get(kHwType).get(), &hw_e);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -999,19 +1126,22 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
auto is_dynamic_mode = args.get(kIsDynamicMode).get();
- auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
- is_dynamic_mode](decltype(out) out) {
+ auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e,
+ hw_e, is_dynamic_mode](decltype(out) out) {
PlatformResult result = common::tools::CheckFileAvailability(model_path);
if (!result) {
LogAndReportError(
- PlatformResult(ErrorCode::NOT_FOUND_ERR, "File does not exist or is not accessible"),
- &out, ("File does not exist or is not accessible: %s", model_path.c_str()));
+ PlatformResult(ErrorCode::NOT_FOUND_ERR,
+ "File does not exist or is not accessible"),
+ &out,
+ ("File does not exist or is not accessible: %s", model_path.c_str()));
return;
}
int res_id = -1;
- result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
- is_dynamic_mode, &res_id);
+ result =
+ single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info,
+ nnfw_e, hw_e, is_dynamic_mode, &res_id);
if (!result) {
ReportError(result, &out);
return;
@@ -1021,8 +1151,9 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
ReportSuccess(out);
};
- bool async =
- (args.contains(kAsync) && args.get(kAsync).is()) ? args.get(kAsync).get() : false;
+ bool async = (args.contains(kAsync) && args.get(kAsync).is())
+ ? args.get(kAsync).get()
+ : false;
if (!async) {
logic(out);
@@ -1043,7 +1174,8 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
}
}
-void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kGetInputMode, bool, out);
@@ -1063,7 +1195,8 @@ void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kInTensorsInfo, double, out);
@@ -1071,14 +1204,18 @@ void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson:
auto id = static_cast(args.get(kId).get());
auto inTensorId = static_cast(args.get(kInTensorsInfo).get());
- TensorsInfo* in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
+ TensorsInfo* in_tensors_info =
+ GetTensorsInfoManager().GetTensorsInfo(inTensorId);
if (nullptr == in_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", inTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", inTensorId));
return;
}
- TensorsInfo* clone = GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
+ TensorsInfo* clone =
+ GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
auto ret = single_manager_.SetNativeInputInfo(id, in_tensors_info);
if (!ret) {
@@ -1090,17 +1227,21 @@ void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson:
ReportSuccess(out);
}
-void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotInvoke(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kTensorsDataId, double, out);
int id = static_cast(args.get(kId).get());
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
- bool async =
- (args.contains(kAsync) && args.get(kAsync).is()) ? args.get(kAsync).get() : false;
-
- TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
+ bool async = (args.contains(kAsync) && args.get(kAsync).is())
+ ? args.get(kAsync).get()
+ : false;
+
+ TensorsData* in_tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (async && in_tensors_data) {
// in case of async flow need to prevent destroying entry data during invoke
// from JS, creation of a copy
@@ -1108,18 +1249,23 @@ void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::objec
in_tensors_data->GetTensorsInfo()->Handle(), in_tensors_data->Handle());
}
if (nullptr == in_tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
- auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
+ auto logic = [this, id, tensors_data_id, in_tensors_data,
+ async](decltype(out) out) {
TensorsData* out_tensors_data = nullptr;
auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
if (async) {
// in case of async flow, the in_tensor_data with underlying TensorsInfo
// was copied, thus need to be released here
- GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
+ GetTensorsInfoManager().DisposeTensorsInfo(
+ in_tensors_data->GetTensorsInfo());
GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
}
if (!ret) {
@@ -1127,8 +1273,10 @@ void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::objec
return;
}
- out[kTensorsDataId] = picojson::value(static_cast(out_tensors_data->Id()));
- out[kTensorsInfoId] = picojson::value(static_cast(out_tensors_data->TensorsInfoId()));
+ out[kTensorsDataId] =
+ picojson::value(static_cast(out_tensors_data->Id()));
+ out[kTensorsInfoId] =
+ picojson::value(static_cast(out_tensors_data->TensorsInfoId()));
ReportSuccess(out);
};
@@ -1150,7 +1298,8 @@ void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::objec
}
}
-void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotGetValue(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1168,7 +1317,8 @@ void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::obj
ReportSuccess(val, out);
}
-void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetValue(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1187,7 +1337,8 @@ void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::obj
ReportSuccess(out);
}
-void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetTimeout(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kTimeout, double, out);
@@ -1204,7 +1355,8 @@ void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLSingleShotClose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotClose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1226,9 +1378,11 @@ bool CreatePipelineArgumentsAreInvalid(const picojson::value& args) {
auto arguments_valid = args.get(kId).is();
arguments_valid &= args.get(kDefinition).is();
- arguments_valid &= (args.get(kPipelineStateChangeListenerName).is() ||
- args.get(kPipelineStateChangeListenerName).is());
- LoggerD("CreatePipeline arguments are %s", arguments_valid ? "valid" : "invalid");
+ arguments_valid &=
+ (args.get(kPipelineStateChangeListenerName).is() ||
+ args.get(kPipelineStateChangeListenerName).is());
+ LoggerD("CreatePipeline arguments are %s",
+ arguments_valid ? "valid" : "invalid");
return !arguments_valid;
}
@@ -1240,7 +1394,9 @@ void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
ScopeLogger("args: %s", args.serialize().c_str());
if (CreatePipelineArgumentsAreInvalid(args)) {
- ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"}, &out);
+ ReportError(
+ PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"},
+ &out);
return;
}
@@ -1251,7 +1407,8 @@ void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
? args.get(kPipelineStateChangeListenerName).get()
: "";
- auto ret = pipeline_manager_.CreatePipeline(id, definition, state_change_listener_name);
+ auto ret = pipeline_manager_.CreatePipeline(id, definition,
+ state_change_listener_name);
if (!ret) {
ReportError(ret, &out);
@@ -1261,7 +1418,8 @@ void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetState(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is()) {
@@ -1282,7 +1440,8 @@ void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::objec
ReportSuccess(state_value, out);
}
-void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineStart(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1299,7 +1458,8 @@ void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object&
ReportSuccess(out);
}
-void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineStop(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1316,7 +1476,8 @@ void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& o
ReportSuccess(out);
}
-void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is()) {
@@ -1336,7 +1497,8 @@ void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1355,7 +1517,8 @@ void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::ob
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetSource(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1374,7 +1537,8 @@ void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::obje
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetSwitch(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is()) {
@@ -1403,7 +1567,8 @@ void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::obje
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetValve(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetValve(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1433,7 +1598,8 @@ void MlInstance::MLPipelineRegisterSinkListener(const picojson::value& args,
auto sink_name = args.get(kName).get();
auto listener_name = args.get(kListenerName).get();
- auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id, listener_name);
+ auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id,
+ listener_name);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1461,8 +1627,8 @@ void MlInstance::MLPipelineUnregisterSinkListener(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerRegisterCustomFilter(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
@@ -1472,15 +1638,19 @@ void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& ar
const auto& custom_filter_name = args.get(kName).get();
const auto& listener_name = args.get(kListenerName).get();
- auto input_tensors_info_id = static_cast(args.get(kInputTensorsInfoId).get());
- auto output_tensors_info_id = static_cast(args.get(kOutputTensorsInfoId).get());
+ auto input_tensors_info_id =
+ static_cast(args.get(kInputTensorsInfoId).get());
+ auto output_tensors_info_id =
+ static_cast(args.get(kOutputTensorsInfoId).get());
TensorsInfo* input_tensors_info_ptr =
GetTensorsInfoManager().GetTensorsInfo(input_tensors_info_id);
if (!input_tensors_info_ptr) {
LogAndReportError(
- PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", input_tensors_info_id));
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d",
+ input_tensors_info_id));
return;
}
@@ -1488,13 +1658,16 @@ void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& ar
GetTensorsInfoManager().GetTensorsInfo(output_tensors_info_id);
if (!output_tensors_info_ptr) {
LogAndReportError(
- PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", output_tensors_info_id));
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d",
+ output_tensors_info_id));
return;
}
auto ret = pipeline_manager_.RegisterCustomFilter(
- custom_filter_name, listener_name, input_tensors_info_ptr, output_tensors_info_ptr);
+ custom_filter_name, listener_name, input_tensors_info_ptr,
+ output_tensors_info_ptr);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1503,8 +1676,8 @@ void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& ar
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerCustomFilterOutput(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
@@ -1515,7 +1688,8 @@ void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args
auto status = static_cast(args.get(kStatus).get());
auto request_id = static_cast(args.get(kRequestId).get());
- auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
+ auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name,
+ request_id, status);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1524,8 +1698,8 @@ void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerUnregisterCustomFilter(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
@@ -1541,7 +1715,8 @@ void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value&
ReportSuccess(out);
}
-void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1554,7 +1729,8 @@ void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, pico
const auto& node_name = args.get(kNodeName).get();
const auto& type = args.get(kType).get();
- PlatformResult result = pipeline_manager_.getProperty(id, node_name, name, type, &out);
+ PlatformResult result =
+ pipeline_manager_.getProperty(id, node_name, name, type, &out);
if (!result) {
LogAndReportError(result, &out);
@@ -1564,7 +1740,8 @@ void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1587,7 +1764,8 @@ void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, pico
}
const picojson::value& property = args.get(kProperty);
- PlatformResult result = pipeline_manager_.setProperty(id, node_name, name, type, property);
+ PlatformResult result =
+ pipeline_manager_.setProperty(id, node_name, name, type, property);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -1596,7 +1774,8 @@ void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1606,7 +1785,8 @@ void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, pico
const auto& name = args.get(kName).get();
int res_id = -1;
- PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
+ PlatformResult result =
+ pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -1615,7 +1795,8 @@ void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSourceInputData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1624,17 +1805,21 @@ void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson
auto pipeline_id = static_cast(args.get(kId).get());
auto& source_name = args.get(kName).get();
- auto tensor_data_id = static_cast(args.get(kTensorsDataId).get());
+ auto tensor_data_id =
+ static_cast(args.get(kTensorsDataId).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
- ("Could not get TensorData handle with given id: %d", tensor_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
+ ("Could not get TensorData handle with given id: %d", tensor_data_id));
return;
}
- auto ret = pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
+ auto ret =
+ pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1643,7 +1828,8 @@ void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson
ReportSuccess(out);
}
-void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1653,7 +1839,8 @@ void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojso
auto& switch_name = args.get(kName).get();
picojson::array pad_list;
- auto ret = pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
+ auto ret =
+ pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1662,7 +1849,8 @@ void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojso
ReportSuccess(picojson::value{std::move(pad_list)}, out);
}
-void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSwitchSelect(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1682,7 +1870,8 @@ void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineValveSetOpen(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1702,7 +1891,8 @@ void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineValveIsOpen(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1721,7 +1911,9 @@ void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::ob
ReportSuccess(picojson::value{open}, out);
}
-void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out) {
+#ifdef NNTRAINER_SUPPORTED
+void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1739,7 +1931,8 @@ void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args, picojson
ReportSuccess(out);
}
-void MlInstance::MLTrainerLayerCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerLayerCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
@@ -1794,7 +1987,8 @@ void MlInstance::MLTrainerLayerDispose(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1804,7 +1998,8 @@ void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, pico
auto name = args.get(kName).get();
auto value = args.get(kValue).get();
- PlatformResult result = trainer_manager_.OptimizerSetProperty(id, name, value);
+ PlatformResult result =
+ trainer_manager_.OptimizerSetProperty(id, name, value);
if (!result) {
ReportError(result, &out);
return;
@@ -1812,7 +2007,8 @@ void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
@@ -1849,7 +2045,8 @@ void MlInstance::MLTrainerOptimizerDispose(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
int id = -1;
PlatformResult result;
@@ -1872,7 +2069,8 @@ void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::obj
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelCompile(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptions, picojson::object, out);
@@ -1889,7 +2087,8 @@ void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::ob
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelAddLayer(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kLayerId, double, out);
@@ -1906,7 +2105,8 @@ void MlInstance::MLTrainerModelAddLayer(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelRun(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelRun(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptions, picojson::object, out);
@@ -1940,7 +2140,8 @@ void MlInstance::MLTrainerModelRun(const picojson::value& args, picojson::object
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSummarize(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSummarize(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kLevel, std::string, out);
@@ -2047,7 +2248,8 @@ void MlInstance::MLTrainerModelLoad(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSetDataset(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kDatasetId, double, out);
@@ -2064,7 +2266,8 @@ void MlInstance::MLTrainerModelSetDataset(const picojson::value& args, picojson:
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptimizerId, double, out);
@@ -2108,8 +2311,8 @@ void MlInstance::MLTrainerDatasetCreateFromFile(const picojson::value& args,
const std::string& valid_file_path = args.get(kValid).get();
const std::string& test_file_path = args.get(kTest).get();
- PlatformResult result =
- trainer_manager_.CreateFileDataset(id, train_file_path, valid_file_path, test_file_path);
+ PlatformResult result = trainer_manager_.CreateFileDataset(
+ id, train_file_path, valid_file_path, test_file_path);
if (!result) {
ReportError(result, &out);
return;
@@ -2118,7 +2321,8 @@ void MlInstance::MLTrainerDatasetCreateFromFile(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -2159,6 +2363,7 @@ void MlInstance::MLTrainerDatasetDispose(const picojson::value& args,
}
ReportSuccess(out);
}
+#endif
#undef CHECK_EXIST
#undef CHECK_TYPE
diff --git a/src/ml/ml_instance.h b/src/ml/ml_instance.h
index 6269a6ef..b78ae46d 100644
--- a/src/ml/ml_instance.h
+++ b/src/ml/ml_instance.h
@@ -19,15 +19,15 @@
#include "common/extension.h"
#include "common/worker.h"
-
#include "ml/ml_pipeline_manager.h"
#include "ml/ml_single_manager.h"
+#ifdef NNTRAINER_SUPPORTED
#include "ml/ml_trainer_manager.h"
-#include "nnstreamer/nnstreamer-single.h"
-#include "nnstreamer/nnstreamer.h"
-
+#endif
#include "ml_tensors_data_manager.h"
#include "ml_tensors_info_manager.h"
+#include "nnstreamer/nnstreamer-single.h"
+#include "nnstreamer/nnstreamer.h"
namespace extension {
namespace ml {
@@ -40,28 +40,44 @@ class MlInstance : public common::ParsedInstance {
TensorsDataManager& GetTensorsDataManager();
private:
- void MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out);
+ void MLCheckNNFWAvailability(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoCountGetter(const picojson::value& args,
+ picojson::object& out);
void MLTensorsInfoCreate(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out);
+ void MLTensorsInfoAddTensorInfo(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetDimensions(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoSetDimensions(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorName(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoSetTensorName(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorSize(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorType(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoSetTensorType(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorsData(const picojson::value& args,
+ picojson::object& out);
void MLTensorsInfoClone(const picojson::value& args, picojson::object& out);
void MLTensorsInfoEquals(const picojson::value& args, picojson::object& out);
void MLTensorsInfoDispose(const picojson::value& args, picojson::object& out);
void MLTensorsDataDispose(const picojson::value& args, picojson::object& out);
- void MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out);
- void MLTensorsDataGetTensorRawDataBinary(const picojson::value& args, std::vector* vec);
- void MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out);
- void MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out);
- void MLTensorsDataSetTensorRawDataBinary(const char* data, size_t size, picojson::object& out);
+ void MLTensorsDataGetTensorRawData(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
+ std::vector* vec);
+ void MLTensorsDataGetTensorType(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsDataSetTensorRawData(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsDataSetTensorRawDataBinary(const char* data, size_t size,
+ picojson::object& out);
/*
* ########## IMPORTANT ##########
@@ -72,13 +88,17 @@ class MlInstance : public common::ParsedInstance {
common::Worker worker_;
SingleManager single_manager_;
- void MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out);
- void MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out);
- void MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out);
+ void MLSingleManagerOpenModel(const picojson::value& args,
+ picojson::object& out);
+ void MLSingleShotGetTensorsInfo(const picojson::value& args,
+ picojson::object& out);
+ void MLSingleShotSetInputInfo(const picojson::value& args,
+ picojson::object& out);
void MLSingleShotInvoke(const picojson::value& args, picojson::object& out);
void MLSingleShotGetValue(const picojson::value& args, picojson::object& out);
void MLSingleShotSetValue(const picojson::value& args, picojson::object& out);
- void MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out);
+ void MLSingleShotSetTimeout(const picojson::value& args,
+ picojson::object& out);
void MLSingleShotClose(const picojson::value& args, picojson::object& out);
/*
@@ -105,7 +125,8 @@ class MlInstance : public common::ParsedInstance {
*/
PipelineManager pipeline_manager_;
- void MLPipelineManagerCreatePipeline(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerCreatePipeline(const picojson::value& args,
+ picojson::object& out);
void MLPipelineGetState(const picojson::value& args, picojson::object& out);
@@ -115,7 +136,8 @@ class MlInstance : public common::ParsedInstance {
void MLPipelineDispose(const picojson::value& args, picojson::object& out);
- void MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out);
+ void MLPipelineGetNodeInfo(const picojson::value& args,
+ picojson::object& out);
void MLPipelineGetSource(const picojson::value& args, picojson::object& out);
@@ -123,57 +145,79 @@ class MlInstance : public common::ParsedInstance {
void MLPipelineGetValve(const picojson::value& args, picojson::object& out);
- void MLPipelineRegisterSinkListener(const picojson::value& args, picojson::object& out);
+ void MLPipelineRegisterSinkListener(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineUnregisterSinkListener(const picojson::value& args, picojson::object& out);
+ void MLPipelineUnregisterSinkListener(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineManagerRegisterCustomFilter(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineManagerCustomFilterOutput(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerCustomFilterOutput(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineManagerUnregisterCustomFilter(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out);
+ void MLPipelineNodeInfoGetProperty(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out);
+ void MLPipelineNodeInfoSetProperty(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out);
+ void MLPipelineGetInputTensorsInfo(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineSourceInputData(const picojson::value& args, picojson::object& out);
+ void MLPipelineSourceInputData(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out);
+ void MLPipelineSwitchGetPadList(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out);
+ void MLPipelineSwitchSelect(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out);
+ void MLPipelineValveSetOpen(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out);
+ void MLPipelineValveIsOpen(const picojson::value& args,
+ picojson::object& out);
+#ifdef NNTRAINER_SUPPORTED
TrainerManager trainer_manager_;
- void MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out);
+ void MLTrainerLayerSetProperty(const picojson::value& args,
+ picojson::object& out);
void MLTrainerLayerCreate(const picojson::value& args, picojson::object& out);
void MLTrainerLayerGetName(const picojson::value& args,
picojson::object& out);
void MLTrainerLayerDispose(const picojson::value& args,
picojson::object& out);
- void MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out);
- void MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out);
+ void MLTrainerOptimizerSetProperty(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerOptimizerCreate(const picojson::value& args,
+ picojson::object& out);
void MLTrainerOptimizerDispose(const picojson::value& args,
picojson::object& out);
void MLTrainerModelCreate(const picojson::value& args, picojson::object& out);
- void MLTrainerModelCompile(const picojson::value& args, picojson::object& out);
- void MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out);
+ void MLTrainerModelCompile(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerModelAddLayer(const picojson::value& args,
+ picojson::object& out);
void MLTrainerModelRun(const picojson::value& args, picojson::object& out);
- void MLTrainerModelSummarize(const picojson::value& args, picojson::object& out);
+ void MLTrainerModelSummarize(const picojson::value& args,
+ picojson::object& out);
void MLTrainerModelCheckMetrics(const picojson::value& args,
picojson::object& out);
void MLTrainerModelSave(const picojson::value& args, picojson::object& out);
void MLTrainerModelLoad(const picojson::value& args, picojson::object& out);
- void MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out);
- void MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out);
+ void MLTrainerModelSetDataset(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerModelSetOptimizer(const picojson::value& args,
+ picojson::object& out);
void MLTrainerModelDispose(const picojson::value& args,
picojson::object& out);
@@ -181,6 +225,7 @@ class MlInstance : public common::ParsedInstance {
void MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out);
void MLTrainerDatasetDispose(const picojson::value& args,
picojson::object& out);
+#endif
};
} // namespace ml
diff --git a/src/ml/ml_utils.cc b/src/ml/ml_utils.cc
index 92fea3f7..7d7a6116 100644
--- a/src/ml/ml_utils.cc
+++ b/src/ml/ml_utils.cc
@@ -64,6 +64,7 @@ const PlatformEnum TensorTypeEnum{
{"INT64", ML_TENSOR_TYPE_INT64}, {"UINT64", ML_TENSOR_TYPE_UINT64},
{"UNKNOWN", ML_TENSOR_TYPE_UNKNOWN}};
+#ifdef NNTRAINER_SUPPORTED
const PlatformEnum OptimizerTypeEnum{
{"OPTIMIZER_ADAM", ML_TRAIN_OPTIMIZER_TYPE_ADAM},
{"OPTIMIZER_SGD", ML_TRAIN_OPTIMIZER_TYPE_SGD},
@@ -104,7 +105,7 @@ const PlatformEnum DatasetModeEnum{
{"MODE_TRAIN", ML_TRAIN_DATASET_MODE_TRAIN},
{"MODE_VALID", ML_TRAIN_DATASET_MODE_VALID},
{"MODE_TEST", ML_TRAIN_DATASET_MODE_TEST}};
-
+#endif
} // namespace types
namespace util {
diff --git a/src/ml/ml_utils.h b/src/ml/ml_utils.h
index 37cd89cf..3b06b097 100644
--- a/src/ml/ml_utils.h
+++ b/src/ml/ml_utils.h
@@ -18,7 +18,9 @@
#define ML_ML_UTILS_H_
#include
+#ifdef NNTRAINER_SUPPORTED
#include
+#endif
#if __cplusplus > 201402L
#include
@@ -45,11 +47,13 @@ extern const PlatformEnum HWTypeEnum;
extern const PlatformEnum NNFWTypeEnum;
extern const PlatformEnum TensorTypeEnum;
+#ifdef NNTRAINER_SUPPORTED
extern const PlatformEnum OptimizerTypeEnum;
extern const PlatformEnum LayerTypeEnum;
extern const PlatformEnum SummaryTypeEnum;
extern const PlatformEnum ModelSaveFormatEnum;
extern const PlatformEnum DatasetModeEnum;
+#endif
} // namespace types