From: Piotr Kosko/Tizen API (PLT) /SRPOL/Engineer/Samsung Electronics
Date: Thu, 10 Mar 2022 14:13:33 +0000 (+0100)
Subject: [ML] Disable ML Trainer for TV build profile
X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=62eb35f2847957d6bb576bab56e247286a5f9bdb;p=platform%2Fcore%2Fapi%2Fwebapi-plugins.git
[ML] Disable ML Trainer for TV build profile
[verification] Compilation succeed.
Tested in chrome console that for Mobile - tizen.ml.trainer is available,
but for TV it is not.
Change-Id: I55dd846d7ba8303a0422deabc9aa2b6dbfe27612
---
diff --git a/packaging/webapi-plugins.spec b/packaging/webapi-plugins.spec
index 98c29791..bf182419 100644
--- a/packaging/webapi-plugins.spec
+++ b/packaging/webapi-plugins.spec
@@ -60,6 +60,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_common_feature_messaging_support 0
%define tizen_common_feature_metadata_support 1
%define tizen_common_feature_ml_support 1
+%define tizen_common_feature_ml_nntrainer_support 1
%define tizen_common_feature_nbs_support 0
%define tizen_common_feature_nfc_support 0
%define tizen_common_feature_nfc_emulation_support 0
@@ -124,6 +125,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_mobile_feature_messaging_support 1
%define tizen_mobile_feature_metadata_support 1
%define tizen_mobile_feature_ml_support 1
+%define tizen_mobile_feature_ml_nntrainer_support 1
%define tizen_mobile_feature_nfc_support 0
%define tizen_mobile_feature_nfc_emulation_support 0
%define tizen_mobile_feature_notification_support 1
@@ -232,6 +234,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_wearable_feature_messaging_support 0
%define tizen_wearable_feature_metadata_support 1
%define tizen_wearable_feature_ml_support 1
+%define tizen_wearable_feature_ml_nntrainer_support 1
%define tizen_wearable_feature_nfc_support 1
%define tizen_wearable_feature_nfc_emulation_support 0
%define tizen_wearable_feature_notification_support 1
@@ -313,6 +316,8 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_tv_feature_messaging_support 0
%define tizen_tv_feature_metadata_support 1
%define tizen_tv_feature_ml_support 1
+#TODO -- enable when native API will be supported on TV
+%define tizen_tv_feature_ml_nntrainer_support 0
%define tizen_tv_feature_nbs_support 0
%define tizen_tv_feature_nfc_support 0
%define tizen_tv_feature_nfc_emulation_support 0
@@ -373,6 +378,7 @@ Source0: %{name}-%{version}.tar.gz
%define tizen_feature_message_port_support %{expand:%tizen_%{?profile}_feature_message_port_support}
%define tizen_feature_messaging_support %{expand:%tizen_%{?profile}_feature_messaging_support}
%define tizen_feature_ml_support %{expand:%tizen_%{?profile}_feature_ml_support}
+%define tizen_feature_ml_nntrainer_support %{expand:%tizen_%{?profile}_feature_ml_nntrainer_support}
%define tizen_feature_nfc_emulation_support %{expand:%tizen_%{?profile}_feature_nfc_emulation_support}
%define tizen_feature_nfc_support %{expand:%tizen_%{?profile}_feature_nfc_support}
%define tizen_feature_notification_support %{expand:%tizen_%{?profile}_feature_notification_support}
@@ -533,10 +539,14 @@ BuildRequires: pkgconfig(db-util)
%if "%{?tizen_feature_ml_support}" == "1" || "%{?unified_build}" == "1"
BuildRequires: pkgconfig(nnstreamer)
BuildRequires: pkgconfig(capi-ml-inference)
+%endif
+
+%if "%{?tizen_feature_ml_nntrainer_support}" == "1" || "%{?unified_build}" == "1"
BuildRequires: pkgconfig(nntrainer)
BuildRequires: pkgconfig(capi-ml-training)
%endif
+
%if "%{?tizen_feature_badge_support}" == "1" || "%{?unified_build}" == "1"
BuildRequires: pkgconfig(badge)
%endif
@@ -739,6 +749,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_mobile_f
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_mobile_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_mobile_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_mobile_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_mobile_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_mobile_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_mobile_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_mobile_feature_nfc_support}"
@@ -831,6 +842,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_mobile_f
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_mobile_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_mobile_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_mobile_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_mobile_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_mobile_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_mobile_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_mobile_feature_nfc_support}"
@@ -911,6 +923,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_wearable
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_wearable_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_wearable_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_wearable_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_wearable_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_wearable_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_wearable_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_wearable_feature_nfc_support}"
@@ -998,6 +1011,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_wearable
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_wearable_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_wearable_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_wearable_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_wearable_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_wearable_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_wearable_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_wearable_feature_nfc_support}"
@@ -1078,6 +1092,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_tv_featu
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_tv_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_tv_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_tv_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_tv_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_tv_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_tv_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_tv_feature_nfc_support}"
@@ -1156,6 +1171,7 @@ GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_message_port_support=%{?tizen_common_f
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_messaging_support=%{?tizen_common_feature_messaging_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_metadata_support=%{?tizen_common_feature_metadata_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_support=%{?tizen_common_feature_ml_support}"
+GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_ml_nntrainer_support=%{?tizen_common_feature_ml_nntrainer_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nbs_support=%{?tizen_common_feature_nbs_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_emulation_support=%{?tizen_common_feature_nfc_emulation_support}"
GYP_OPTIONS="$GYP_OPTIONS -Dtizen_feature_nfc_support=%{?tizen_common_feature_nfc_support}"
diff --git a/src/ml/ml.gyp b/src/ml/ml.gyp
index 9f3534c1..ff6e5323 100644
--- a/src/ml/ml.gyp
+++ b/src/ml/ml.gyp
@@ -37,10 +37,6 @@
'ml_tensors_info_manager.h',
'ml_single_manager.cc',
'ml_single_manager.h',
- 'ml_trainer_manager.cc',
- 'ml_trainer_manager.h',
- 'ml_trainer_objects.cc',
- 'ml_trainer_objects.h',
'ml_singleshot.cc',
'ml_singleshot.h',
'ml_utils.cc',
@@ -52,10 +48,25 @@
'packages': [
'nnstreamer',
'capi-ml-inference',
+ ]
+ },
+ }],
+ ['tizen_feature_ml_nntrainer_support == 1', {
+ 'variables': {
+ 'packages': [
'nntrainer',
'capi-ml-training',
- ]
+ ],
},
+ 'sources': [
+ 'ml_trainer_manager.cc',
+ 'ml_trainer_manager.h',
+ 'ml_trainer_objects.cc',
+ 'ml_trainer_objects.h',
+ ],
+ 'defines': [
+ 'NNTRAINER_SUPPORTED',
+ ],
}],
],
},
diff --git a/src/ml/ml_instance.cc b/src/ml/ml_instance.cc
index 6e256d79..c2a28361 100644
--- a/src/ml/ml_instance.cc
+++ b/src/ml/ml_instance.cc
@@ -15,16 +15,17 @@
*/
#include "ml_instance.h"
-#include "ml_utils.h"
#include "common/converter.h"
#include "common/logger.h"
#include "common/picojson.h"
#include "common/platform_result.h"
#include "common/tools.h"
+#include "ml_utils.h"
static_assert(ML_TENSOR_RANK_LIMIT == 4,
- "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
+ "This implementation requires different ML_TENSOR_RANK_LIMIT. "
+ "Please fix the code.");
namespace extension {
namespace ml {
@@ -91,17 +92,19 @@ const std::string kMode = "mode";
using namespace common;
-#define CHECK_EXIST(args, name, out) \
- if (!args.contains(name)) { \
- std::string msg = std::string(name) + " is required argument"; \
- LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
- return; \
+#define CHECK_EXIST(args, name, out) \
+ if (!args.contains(name)) { \
+ std::string msg = std::string(name) + " is required argument"; \
+ LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), \
+ &out); \
+ return; \
}
-// CHECK_TYPE will throw AbortError by default, but it can be changed by providing
-// additional parameter to the macro, i.e.:
-// CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
-#define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
+// CHECK_TYPE will throw AbortError by default, but it can be changed by
+// providing additional parameter to the macro, i.e.: CHECK_TYPE(args, "name",
+// std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
+#define CHECK_TYPE(...) \
+ CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
#define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
#define CHECK_TYPE_5(args, name, type, out, error_type) \
if (!args.get(name).is()) { \
@@ -123,10 +126,13 @@ MlInstance::MlInstance()
ScopeLogger();
using namespace std::placeholders;
-#define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
-#define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
+#define REGISTER_METHOD(M) \
+ RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
+#define REGISTER_BINARY_METHOD(M) \
+ RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
#define REGISTER_METHOD_WITH_BINARY_ANWSER(M) \
- RegisterSyncHandlerWithBinaryAnswer(#M, std::bind(&MlInstance::M, this, _1, _2))
+ RegisterSyncHandlerWithBinaryAnswer(#M, \
+ std::bind(&MlInstance::M, this, _1, _2))
REGISTER_METHOD(MLCheckNNFWAvailability);
REGISTER_METHOD(MLTensorsInfoCountGetter);
@@ -183,6 +189,7 @@ MlInstance::MlInstance()
REGISTER_METHOD(MLPipelineManagerCustomFilterOutput);
REGISTER_METHOD(MLPipelineManagerUnregisterCustomFilter);
+#ifdef NNTRAINER_SUPPORTED
REGISTER_METHOD(MLTrainerLayerSetProperty);
REGISTER_METHOD(MLTrainerLayerCreate);
REGISTER_METHOD(MLTrainerLayerGetName);
@@ -205,6 +212,7 @@ MlInstance::MlInstance()
REGISTER_METHOD(MLTrainerDatasetCreateFromFile);
REGISTER_METHOD(MLTrainerDatasetSetProperty);
REGISTER_METHOD(MLTrainerDatasetDispose);
+#endif
#undef REGISTER_METHOD
}
@@ -222,7 +230,8 @@ TensorsDataManager& MlInstance::GetTensorsDataManager() {
return tensors_data_manager_;
}
-void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLCheckNNFWAvailability(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_EXIST(args, kNnfw, out)
CHECK_EXIST(args, kHw, out)
@@ -234,34 +243,41 @@ void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::
if (args.get(kCustomRequirement).is()) {
customRequirement = args.get(kCustomRequirement).get();
}
- bool availability_val = util::CheckNNFWAvailability(nnfw, hw, customRequirement);
+ bool availability_val =
+ util::CheckNNFWAvailability(nnfw, hw, customRequirement);
picojson::value available = picojson::value{availability_val};
ReportSuccess(available, out);
}
-void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not create new TensorsInfo handle"));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not create new TensorsInfo handle"));
return;
}
out[kId] = picojson::value(static_cast(tensorsInfo->Id()));
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
unsigned int count = 0;
@@ -274,7 +290,8 @@ void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson:
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kType, std::string, out);
@@ -283,20 +300,26 @@ void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojso
CHECK_ARGS(args, kDimensions, picojson::array, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
const std::string& tensorType = args.get(kType).get();
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+ PlatformResult result =
+ types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting value of TensorType"),
&out,
- ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getValue() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -326,16 +349,20 @@ void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
@@ -356,17 +383,21 @@ void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kDimensions, picojson::array, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
@@ -387,16 +418,20 @@ void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
@@ -410,17 +445,21 @@ void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kName, std::string, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
@@ -434,16 +473,20 @@ void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
@@ -458,22 +501,27 @@ void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast(args.get(kIndex).get());
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
+ PlatformResult result =
+ tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -481,9 +529,11 @@ void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojso
std::string tensorTypeString;
result = types::TensorTypeEnum.getName(tensorTypeEnum, &tensorTypeString);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting name of TensorType"),
&out,
- ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getName() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -491,27 +541,34 @@ void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kType, std::string, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
const std::string& tensorType = args.get(kType).get();
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+ PlatformResult result =
+ types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting value of TensorType"),
&out,
- ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getValue() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -524,46 +581,58 @@ void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
- TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
+ TensorsData* tensorsData =
+ GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
if (!tensorsData) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not create TensorsData"));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not create TensorsData"));
return;
}
out[kTensorsDataId] = picojson::value(static_cast(tensorsData->Id()));
- out[kTensorsInfoId] = picojson::value(static_cast(tensorsData->TensorsInfoId()));
+ out[kTensorsInfoId] =
+ picojson::value(static_cast(tensorsData->TensorsInfoId()));
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoClone(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
if (nullptr == cloned) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
return;
}
@@ -571,7 +640,8 @@ void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::objec
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoEquals(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kOtherId, double, out);
@@ -581,15 +651,18 @@ void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::obje
TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
if (nullptr == first) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", firstId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not find TensorsInfo handle with given id: %d", firstId));
return;
}
TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
if (nullptr == second) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", secondId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", secondId));
return;
}
@@ -598,27 +671,35 @@ void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::obje
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast(args.get(kTensorsInfoId).get());
- PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
+ PlatformResult result =
+ GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
if (!result) {
LogAndReportError(result, &out);
}
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
@@ -628,7 +709,8 @@ void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::obj
}
// Dispose underlying tensorsInfo
- PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
+ PlatformResult result =
+ GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
if (!result) {
LogAndReportError(result, &out);
return;
@@ -642,7 +724,8 @@ void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::obj
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
@@ -652,31 +735,36 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico
int tensor_data_id = static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensor_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensor_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -689,7 +777,8 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico
return;
}
- std::vector out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
+ std::vector out_data{raw_data.data,
+ raw_data.data + raw_data.size_in_bytes};
out[kBuffer] = picojson::value(picojson::string_type, true);
common::encode_binary_in_string(out_data, out[kBuffer].get());
@@ -703,8 +792,8 @@ void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
- std::vector* out) {
+void MlInstance::MLTensorsDataGetTensorRawDataBinary(
+ const picojson::value& args, std::vector* out) {
ScopeLogger("args: %s", args.serialize().c_str());
// TODO handle errors to out
// CHECK_ARGS(args, kTensorsDataId, double, out);
@@ -715,17 +804,20 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
int tensor_data_id = static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LoggerE("Could not find TensorsData handle with given id: %d", tensor_data_id);
- tools::ReportErrorToBinary(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
- out);
+ LoggerE("Could not find TensorsData handle with given id: %d",
+ tensor_data_id);
+ tools::ReportErrorToBinary(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ out);
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
@@ -733,7 +825,8 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
@@ -741,8 +834,8 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
@@ -767,7 +860,8 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
}
out_json[kShape] = picojson::value{shape};
- std::vector out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
+ std::vector out_data{raw_data.data,
+ raw_data.data + raw_data.size_in_bytes};
// FORMAT:
// 4 byte === JSON lenght (N)
@@ -778,28 +872,36 @@ void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args
tools::ReportDataToBinary(out_data, out);
}
-void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
std::string tensor_type_string;
- PlatformResult result =
- types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
+ PlatformResult result = types::TensorTypeEnum.getName(
+ tensors_data->GetTensorType(index), &tensor_type_string);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting name of TensorType"),
&out,
- ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getName() failed, error: %s",
+ result.message().c_str()));
return;
}
@@ -807,46 +909,54 @@ void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojso
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger();
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kBuffer, std::string, out);
CHECK_ARGS(args, kLocation, picojson::array, out);
CHECK_ARGS(args, kSize, picojson::array, out);
- LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
+ LoggerD("%s, %s", kTensorsDataId.c_str(),
+ args.get(kTensorsDataId).serialize().c_str());
LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -865,7 +975,8 @@ void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size,
+void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data,
+ size_t data_size,
picojson::object& out) {
ScopeLogger();
/*
@@ -880,7 +991,8 @@ void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t da
*/
unsigned int call_args_len_begin = 0;
unsigned int call_args_len = static_cast(
- (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) +
+ (data[call_args_len_begin] << 24) +
+ (data[call_args_len_begin + 1] << 16) +
(data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3]));
unsigned int buffer_len_begin = call_args_len_begin + 4;
@@ -900,46 +1012,54 @@ void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t da
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kLocation, picojson::array, out);
CHECK_ARGS(args, kSize, picojson::array, out);
- LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
+ LoggerD("%s, %s", kTensorsDataId.c_str(),
+ args.get(kTensorsDataId).serialize().c_str());
LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
int index = static_cast(args.get(kIndex).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
}
- TensorRawData raw_data{reinterpret_cast(const_cast(data + buffer_begin)),
- buffer_len};
+ TensorRawData raw_data{
+ reinterpret_cast(const_cast(data + buffer_begin)),
+ buffer_len};
result = tensors_data->SetTensorRawData(index, location, size, raw_data);
if (!result) {
LogAndReportError(result, &out);
@@ -949,7 +1069,8 @@ void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t da
ReportSuccess(out);
}
-void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleManagerOpenModel(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kModelPath, std::string, out);
CHECK_ARGS(args, kInTensorsInfo, double, out);
@@ -958,7 +1079,8 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
CHECK_ARGS(args, kHwType, std::string, out);
CHECK_ARGS(args, kIsDynamicMode, bool, out);
- const auto& model_path = common::tools::ConvertUriToPath(args.get(kModelPath).get());
+ const auto& model_path =
+ common::tools::ConvertUriToPath(args.get(kModelPath).get());
CHECK_STORAGE_ACCESS(model_path, &out);
TensorsInfo* in_tensors_info = nullptr;
@@ -966,8 +1088,10 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
if (kNoId != inTensorId) {
in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
if (nullptr == in_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", inTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", inTensorId));
return;
}
}
@@ -977,22 +1101,25 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
if (kNoId != outTensorId) {
out_tensors_info = GetTensorsInfoManager().GetTensorsInfo(outTensorId);
if (nullptr == out_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", outTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", outTensorId));
return;
}
}
ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
- PlatformResult result =
- types::NNFWTypeEnum.getValue(args.get(kFwType).get(), &nnfw_e);
+ PlatformResult result = types::NNFWTypeEnum.getValue(
+ args.get(kFwType).get(), &nnfw_e);
if (!result) {
LogAndReportError(result, &out);
return;
}
ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
- result = types::HWTypeEnum.getValue(args.get(kHwType).get(), &hw_e);
+ result =
+ types::HWTypeEnum.getValue(args.get(kHwType).get(), &hw_e);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -1000,19 +1127,22 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
auto is_dynamic_mode = args.get(kIsDynamicMode).get();
- auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
- is_dynamic_mode](decltype(out) out) {
+ auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e,
+ hw_e, is_dynamic_mode](decltype(out) out) {
PlatformResult result = common::tools::CheckFileAvailability(model_path);
if (!result) {
LogAndReportError(
- PlatformResult(ErrorCode::NOT_FOUND_ERR, "File does not exist or is not accessible"),
- &out, ("File does not exist or is not accessible: %s", model_path.c_str()));
+ PlatformResult(ErrorCode::NOT_FOUND_ERR,
+ "File does not exist or is not accessible"),
+ &out,
+ ("File does not exist or is not accessible: %s", model_path.c_str()));
return;
}
int res_id = -1;
- result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
- is_dynamic_mode, &res_id);
+ result =
+ single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info,
+ nnfw_e, hw_e, is_dynamic_mode, &res_id);
if (!result) {
ReportError(result, &out);
return;
@@ -1022,8 +1152,9 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
ReportSuccess(out);
};
- bool async =
- (args.contains(kAsync) && args.get(kAsync).is()) ? args.get(kAsync).get() : false;
+ bool async = (args.contains(kAsync) && args.get(kAsync).is())
+ ? args.get(kAsync).get()
+ : false;
if (!async) {
logic(out);
@@ -1044,7 +1175,8 @@ void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson:
}
}
-void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kGetInputMode, bool, out);
@@ -1064,7 +1196,8 @@ void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojso
ReportSuccess(out);
}
-void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kInTensorsInfo, double, out);
@@ -1072,14 +1205,18 @@ void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson:
auto id = static_cast(args.get(kId).get());
auto inTensorId = static_cast(args.get(kInTensorsInfo).get());
- TensorsInfo* in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
+ TensorsInfo* in_tensors_info =
+ GetTensorsInfoManager().GetTensorsInfo(inTensorId);
if (nullptr == in_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", inTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", inTensorId));
return;
}
- TensorsInfo* clone = GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
+ TensorsInfo* clone =
+ GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
auto ret = single_manager_.SetNativeInputInfo(id, in_tensors_info);
if (!ret) {
@@ -1091,17 +1228,21 @@ void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson:
ReportSuccess(out);
}
-void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotInvoke(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kTensorsDataId, double, out);
int id = static_cast(args.get(kId).get());
- int tensors_data_id = static_cast(args.get(kTensorsDataId).get());
- bool async =
- (args.contains(kAsync) && args.get(kAsync).is()) ? args.get(kAsync).get() : false;
-
- TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ int tensors_data_id =
+ static_cast(args.get(kTensorsDataId).get());
+ bool async = (args.contains(kAsync) && args.get(kAsync).is())
+ ? args.get(kAsync).get()
+ : false;
+
+ TensorsData* in_tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (async && in_tensors_data) {
// in case of async flow need to prevent destroying entry data during invoke
// from JS, creation of a copy
@@ -1109,18 +1250,23 @@ void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::objec
in_tensors_data->GetTensorsInfo()->Handle(), in_tensors_data->Handle());
}
if (nullptr == in_tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
- auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
+ auto logic = [this, id, tensors_data_id, in_tensors_data,
+ async](decltype(out) out) {
TensorsData* out_tensors_data = nullptr;
auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
if (async) {
// in case of async flow, the in_tensor_data with underlying TensorsInfo
// was copied, thus need to be released here
- GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
+ GetTensorsInfoManager().DisposeTensorsInfo(
+ in_tensors_data->GetTensorsInfo());
GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
}
if (!ret) {
@@ -1128,8 +1274,10 @@ void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::objec
return;
}
- out[kTensorsDataId] = picojson::value(static_cast(out_tensors_data->Id()));
- out[kTensorsInfoId] = picojson::value(static_cast(out_tensors_data->TensorsInfoId()));
+ out[kTensorsDataId] =
+ picojson::value(static_cast(out_tensors_data->Id()));
+ out[kTensorsInfoId] =
+ picojson::value(static_cast(out_tensors_data->TensorsInfoId()));
ReportSuccess(out);
};
@@ -1151,7 +1299,8 @@ void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::objec
}
}
-void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotGetValue(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1169,7 +1318,8 @@ void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::obj
ReportSuccess(val, out);
}
-void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetValue(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1188,7 +1338,8 @@ void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::obj
ReportSuccess(out);
}
-void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetTimeout(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kTimeout, double, out);
@@ -1205,7 +1356,8 @@ void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLSingleShotClose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotClose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1227,9 +1379,11 @@ bool CreatePipelineArgumentsAreInvalid(const picojson::value& args) {
auto arguments_valid = args.get(kId).is();
arguments_valid &= args.get(kDefinition).is();
- arguments_valid &= (args.get(kPipelineStateChangeListenerName).is() ||
- args.get(kPipelineStateChangeListenerName).is());
- LoggerD("CreatePipeline arguments are %s", arguments_valid ? "valid" : "invalid");
+ arguments_valid &=
+ (args.get(kPipelineStateChangeListenerName).is() ||
+ args.get(kPipelineStateChangeListenerName).is());
+ LoggerD("CreatePipeline arguments are %s",
+ arguments_valid ? "valid" : "invalid");
return !arguments_valid;
}
@@ -1241,7 +1395,9 @@ void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
ScopeLogger("args: %s", args.serialize().c_str());
if (CreatePipelineArgumentsAreInvalid(args)) {
- ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"}, &out);
+ ReportError(
+ PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"},
+ &out);
return;
}
@@ -1252,7 +1408,8 @@ void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
? args.get(kPipelineStateChangeListenerName).get()
: "";
- auto ret = pipeline_manager_.CreatePipeline(id, definition, state_change_listener_name);
+ auto ret = pipeline_manager_.CreatePipeline(id, definition,
+ state_change_listener_name);
if (!ret) {
ReportError(ret, &out);
@@ -1262,7 +1419,8 @@ void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetState(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is()) {
@@ -1283,7 +1441,8 @@ void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::objec
ReportSuccess(state_value, out);
}
-void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineStart(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1300,7 +1459,8 @@ void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object&
ReportSuccess(out);
}
-void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineStop(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1317,7 +1477,8 @@ void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& o
ReportSuccess(out);
}
-void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is()) {
@@ -1337,7 +1498,8 @@ void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1356,7 +1518,8 @@ void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::ob
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetSource(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1375,7 +1538,8 @@ void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::obje
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetSwitch(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is()) {
@@ -1404,7 +1568,8 @@ void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::obje
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetValve(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetValve(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1434,7 +1599,8 @@ void MlInstance::MLPipelineRegisterSinkListener(const picojson::value& args,
auto sink_name = args.get(kName).get();
auto listener_name = args.get(kListenerName).get();
- auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id, listener_name);
+ auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id,
+ listener_name);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1462,8 +1628,8 @@ void MlInstance::MLPipelineUnregisterSinkListener(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerRegisterCustomFilter(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
@@ -1473,15 +1639,19 @@ void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& ar
const auto& custom_filter_name = args.get(kName).get();
const auto& listener_name = args.get(kListenerName).get();
- auto input_tensors_info_id = static_cast(args.get(kInputTensorsInfoId).get());
- auto output_tensors_info_id = static_cast(args.get(kOutputTensorsInfoId).get());
+ auto input_tensors_info_id =
+ static_cast(args.get(kInputTensorsInfoId).get());
+ auto output_tensors_info_id =
+ static_cast(args.get(kOutputTensorsInfoId).get());
TensorsInfo* input_tensors_info_ptr =
GetTensorsInfoManager().GetTensorsInfo(input_tensors_info_id);
if (!input_tensors_info_ptr) {
LogAndReportError(
- PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", input_tensors_info_id));
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d",
+ input_tensors_info_id));
return;
}
@@ -1489,13 +1659,16 @@ void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& ar
GetTensorsInfoManager().GetTensorsInfo(output_tensors_info_id);
if (!output_tensors_info_ptr) {
LogAndReportError(
- PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", output_tensors_info_id));
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d",
+ output_tensors_info_id));
return;
}
auto ret = pipeline_manager_.RegisterCustomFilter(
- custom_filter_name, listener_name, input_tensors_info_ptr, output_tensors_info_ptr);
+ custom_filter_name, listener_name, input_tensors_info_ptr,
+ output_tensors_info_ptr);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1504,8 +1677,8 @@ void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& ar
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerCustomFilterOutput(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
@@ -1516,7 +1689,8 @@ void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args
auto status = static_cast(args.get(kStatus).get());
auto request_id = static_cast(args.get(kRequestId).get());
- auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
+ auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name,
+ request_id, status);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1525,8 +1699,8 @@ void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerUnregisterCustomFilter(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
@@ -1542,7 +1716,8 @@ void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value&
ReportSuccess(out);
}
-void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1555,7 +1730,8 @@ void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, pico
const auto& node_name = args.get(kNodeName).get();
const auto& type = args.get(kType).get();
- PlatformResult result = pipeline_manager_.getProperty(id, node_name, name, type, &out);
+ PlatformResult result =
+ pipeline_manager_.getProperty(id, node_name, name, type, &out);
if (!result) {
LogAndReportError(result, &out);
@@ -1565,7 +1741,8 @@ void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1588,7 +1765,8 @@ void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, pico
}
const picojson::value& property = args.get(kProperty);
- PlatformResult result = pipeline_manager_.setProperty(id, node_name, name, type, property);
+ PlatformResult result =
+ pipeline_manager_.setProperty(id, node_name, name, type, property);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -1597,7 +1775,8 @@ void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1607,7 +1786,8 @@ void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, pico
const auto& name = args.get(kName).get();
int res_id = -1;
- PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
+ PlatformResult result =
+ pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
if (!result) {
LogAndReportError(result, &out);
return;
@@ -1616,7 +1796,8 @@ void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSourceInputData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1625,17 +1806,21 @@ void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson
auto pipeline_id = static_cast(args.get(kId).get());
auto& source_name = args.get(kName).get();
- auto tensor_data_id = static_cast(args.get(kTensorsDataId).get());
+ auto tensor_data_id =
+ static_cast(args.get(kTensorsDataId).get());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
- ("Could not get TensorData handle with given id: %d", tensor_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
+ ("Could not get TensorData handle with given id: %d", tensor_data_id));
return;
}
- auto ret = pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
+ auto ret =
+ pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1644,7 +1829,8 @@ void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson
ReportSuccess(out);
}
-void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1654,7 +1840,8 @@ void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojso
auto& switch_name = args.get(kName).get();
picojson::array pad_list;
- auto ret = pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
+ auto ret =
+ pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
if (!ret) {
LogAndReportError(ret, &out);
return;
@@ -1663,7 +1850,8 @@ void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojso
ReportSuccess(picojson::value{std::move(pad_list)}, out);
}
-void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSwitchSelect(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1683,7 +1871,8 @@ void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineValveSetOpen(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1703,7 +1892,8 @@ void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineValveIsOpen(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
@@ -1722,7 +1912,9 @@ void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::ob
ReportSuccess(picojson::value{open}, out);
}
-void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out) {
+#ifdef NNTRAINER_SUPPORTED
+void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1740,7 +1932,8 @@ void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args, picojson
ReportSuccess(out);
}
-void MlInstance::MLTrainerLayerCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerLayerCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
@@ -1795,7 +1988,8 @@ void MlInstance::MLTrainerLayerDispose(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -1805,7 +1999,8 @@ void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, pico
auto name = args.get(kName).get();
auto value = args.get(kValue).get();
- PlatformResult result = trainer_manager_.OptimizerSetProperty(id, name, value);
+ PlatformResult result =
+ trainer_manager_.OptimizerSetProperty(id, name, value);
if (!result) {
ReportError(result, &out);
return;
@@ -1813,7 +2008,8 @@ void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, pico
ReportSuccess(out);
}
-void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
@@ -1850,7 +2046,8 @@ void MlInstance::MLTrainerOptimizerDispose(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
int id = -1;
PlatformResult result;
@@ -1873,7 +2070,8 @@ void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::obj
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelCompile(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptions, picojson::object, out);
@@ -1890,7 +2088,8 @@ void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::ob
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelAddLayer(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kLayerId, double, out);
@@ -1907,7 +2106,8 @@ void MlInstance::MLTrainerModelAddLayer(const picojson::value& args, picojson::o
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelRun(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelRun(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptions, picojson::object, out);
@@ -1946,7 +2146,8 @@ void MlInstance::MLTrainerModelRun(const picojson::value& args, picojson::object
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSummarize(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSummarize(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kLevel, std::string, out);
@@ -2053,7 +2254,8 @@ void MlInstance::MLTrainerModelLoad(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSetDataset(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kDatasetId, double, out);
@@ -2070,7 +2272,8 @@ void MlInstance::MLTrainerModelSetDataset(const picojson::value& args, picojson:
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptimizerId, double, out);
@@ -2119,8 +2322,8 @@ void MlInstance::MLTrainerDatasetCreateFromFile(const picojson::value& args,
const std::string& valid_file_path = args.get(kValid).get();
const std::string& test_file_path = args.get(kTest).get();
- PlatformResult result =
- trainer_manager_.CreateFileDataset(id, train_file_path, valid_file_path, test_file_path);
+ PlatformResult result = trainer_manager_.CreateFileDataset(
+ id, train_file_path, valid_file_path, test_file_path);
if (!result) {
ReportError(result, &out);
return;
@@ -2129,7 +2332,8 @@ void MlInstance::MLTrainerDatasetCreateFromFile(const picojson::value& args,
ReportSuccess(out);
}
-void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
@@ -2170,6 +2374,7 @@ void MlInstance::MLTrainerDatasetDispose(const picojson::value& args,
}
ReportSuccess(out);
}
+#endif
#undef CHECK_EXIST
#undef CHECK_TYPE
diff --git a/src/ml/ml_instance.h b/src/ml/ml_instance.h
index 5fed8fa8..1e5a2a73 100644
--- a/src/ml/ml_instance.h
+++ b/src/ml/ml_instance.h
@@ -19,15 +19,15 @@
#include "common/extension.h"
#include "common/worker.h"
-
#include "ml/ml_pipeline_manager.h"
#include "ml/ml_single_manager.h"
+#ifdef NNTRAINER_SUPPORTED
#include "ml/ml_trainer_manager.h"
-#include "nnstreamer/nnstreamer-single.h"
-#include "nnstreamer/nnstreamer.h"
-
+#endif
#include "ml_tensors_data_manager.h"
#include "ml_tensors_info_manager.h"
+#include "nnstreamer/nnstreamer-single.h"
+#include "nnstreamer/nnstreamer.h"
namespace extension {
namespace ml {
@@ -40,28 +40,44 @@ class MlInstance : public common::ParsedInstance {
TensorsDataManager& GetTensorsDataManager();
private:
- void MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out);
+ void MLCheckNNFWAvailability(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoCountGetter(const picojson::value& args,
+ picojson::object& out);
void MLTensorsInfoCreate(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out);
- void MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out);
+ void MLTensorsInfoAddTensorInfo(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetDimensions(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoSetDimensions(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorName(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoSetTensorName(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorSize(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorType(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoSetTensorType(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsInfoGetTensorsData(const picojson::value& args,
+ picojson::object& out);
void MLTensorsInfoClone(const picojson::value& args, picojson::object& out);
void MLTensorsInfoEquals(const picojson::value& args, picojson::object& out);
void MLTensorsInfoDispose(const picojson::value& args, picojson::object& out);
void MLTensorsDataDispose(const picojson::value& args, picojson::object& out);
- void MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out);
- void MLTensorsDataGetTensorRawDataBinary(const picojson::value& args, std::vector* vec);
- void MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out);
- void MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out);
- void MLTensorsDataSetTensorRawDataBinary(const char* data, size_t size, picojson::object& out);
+ void MLTensorsDataGetTensorRawData(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
+ std::vector* vec);
+ void MLTensorsDataGetTensorType(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsDataSetTensorRawData(const picojson::value& args,
+ picojson::object& out);
+ void MLTensorsDataSetTensorRawDataBinary(const char* data, size_t size,
+ picojson::object& out);
/*
* ########## IMPORTANT ##########
@@ -72,13 +88,17 @@ class MlInstance : public common::ParsedInstance {
common::Worker worker_;
SingleManager single_manager_;
- void MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out);
- void MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out);
- void MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out);
+ void MLSingleManagerOpenModel(const picojson::value& args,
+ picojson::object& out);
+ void MLSingleShotGetTensorsInfo(const picojson::value& args,
+ picojson::object& out);
+ void MLSingleShotSetInputInfo(const picojson::value& args,
+ picojson::object& out);
void MLSingleShotInvoke(const picojson::value& args, picojson::object& out);
void MLSingleShotGetValue(const picojson::value& args, picojson::object& out);
void MLSingleShotSetValue(const picojson::value& args, picojson::object& out);
- void MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out);
+ void MLSingleShotSetTimeout(const picojson::value& args,
+ picojson::object& out);
void MLSingleShotClose(const picojson::value& args, picojson::object& out);
/*
@@ -105,7 +125,8 @@ class MlInstance : public common::ParsedInstance {
*/
PipelineManager pipeline_manager_;
- void MLPipelineManagerCreatePipeline(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerCreatePipeline(const picojson::value& args,
+ picojson::object& out);
void MLPipelineGetState(const picojson::value& args, picojson::object& out);
@@ -115,7 +136,8 @@ class MlInstance : public common::ParsedInstance {
void MLPipelineDispose(const picojson::value& args, picojson::object& out);
- void MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out);
+ void MLPipelineGetNodeInfo(const picojson::value& args,
+ picojson::object& out);
void MLPipelineGetSource(const picojson::value& args, picojson::object& out);
@@ -123,65 +145,91 @@ class MlInstance : public common::ParsedInstance {
void MLPipelineGetValve(const picojson::value& args, picojson::object& out);
- void MLPipelineRegisterSinkListener(const picojson::value& args, picojson::object& out);
+ void MLPipelineRegisterSinkListener(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineUnregisterSinkListener(const picojson::value& args, picojson::object& out);
+ void MLPipelineUnregisterSinkListener(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineManagerRegisterCustomFilter(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineManagerCustomFilterOutput(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerCustomFilterOutput(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineManagerUnregisterCustomFilter(const picojson::value& args, picojson::object& out);
+ void MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out);
+ void MLPipelineNodeInfoGetProperty(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out);
+ void MLPipelineNodeInfoSetProperty(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out);
+ void MLPipelineGetInputTensorsInfo(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineSourceInputData(const picojson::value& args, picojson::object& out);
+ void MLPipelineSourceInputData(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out);
+ void MLPipelineSwitchGetPadList(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out);
+ void MLPipelineSwitchSelect(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out);
+ void MLPipelineValveSetOpen(const picojson::value& args,
+ picojson::object& out);
- void MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out);
+ void MLPipelineValveIsOpen(const picojson::value& args,
+ picojson::object& out);
+#ifdef NNTRAINER_SUPPORTED
TrainerManager trainer_manager_;
- void MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out);
+ void MLTrainerLayerSetProperty(const picojson::value& args,
+ picojson::object& out);
void MLTrainerLayerCreate(const picojson::value& args, picojson::object& out);
void MLTrainerLayerGetName(const picojson::value& args,
picojson::object& out);
void MLTrainerLayerDispose(const picojson::value& args,
picojson::object& out);
- void MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out);
- void MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out);
+ void MLTrainerOptimizerSetProperty(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerOptimizerCreate(const picojson::value& args,
+ picojson::object& out);
void MLTrainerOptimizerDispose(const picojson::value& args,
picojson::object& out);
void MLTrainerModelCreate(const picojson::value& args, picojson::object& out);
- void MLTrainerModelCompile(const picojson::value& args, picojson::object& out);
- void MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out);
+ void MLTrainerModelCompile(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerModelAddLayer(const picojson::value& args,
+ picojson::object& out);
void MLTrainerModelRun(const picojson::value& args, picojson::object& out);
- void MLTrainerModelSummarize(const picojson::value& args, picojson::object& out);
+ void MLTrainerModelSummarize(const picojson::value& args,
+ picojson::object& out);
void MLTrainerModelCheckMetrics(const picojson::value& args,
picojson::object& out);
void MLTrainerModelSave(const picojson::value& args, picojson::object& out);
void MLTrainerModelLoad(const picojson::value& args, picojson::object& out);
- void MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out);
- void MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out);
+ void MLTrainerModelSetDataset(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerModelSetOptimizer(const picojson::value& args,
+ picojson::object& out);
void MLTrainerModelDispose(const picojson::value& args,
picojson::object& out);
- void MLTrainerDatasetCreateGenerator(const picojson::value& args, picojson::object& out);
- void MLTrainerDatasetCreateFromFile(const picojson::value& args, picojson::object& out);
- void MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out);
+ void MLTrainerDatasetCreateGenerator(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerDatasetCreateFromFile(const picojson::value& args,
+ picojson::object& out);
+ void MLTrainerDatasetSetProperty(const picojson::value& args,
+ picojson::object& out);
void MLTrainerDatasetDispose(const picojson::value& args,
picojson::object& out);
+#endif
};
} // namespace ml
diff --git a/src/ml/ml_utils.cc b/src/ml/ml_utils.cc
index 92fea3f7..7d7a6116 100644
--- a/src/ml/ml_utils.cc
+++ b/src/ml/ml_utils.cc
@@ -64,6 +64,7 @@ const PlatformEnum TensorTypeEnum{
{"INT64", ML_TENSOR_TYPE_INT64}, {"UINT64", ML_TENSOR_TYPE_UINT64},
{"UNKNOWN", ML_TENSOR_TYPE_UNKNOWN}};
+#ifdef NNTRAINER_SUPPORTED
const PlatformEnum OptimizerTypeEnum{
{"OPTIMIZER_ADAM", ML_TRAIN_OPTIMIZER_TYPE_ADAM},
{"OPTIMIZER_SGD", ML_TRAIN_OPTIMIZER_TYPE_SGD},
@@ -104,7 +105,7 @@ const PlatformEnum DatasetModeEnum{
{"MODE_TRAIN", ML_TRAIN_DATASET_MODE_TRAIN},
{"MODE_VALID", ML_TRAIN_DATASET_MODE_VALID},
{"MODE_TEST", ML_TRAIN_DATASET_MODE_TEST}};
-
+#endif
} // namespace types
namespace util {
diff --git a/src/ml/ml_utils.h b/src/ml/ml_utils.h
index 37cd89cf..3b06b097 100644
--- a/src/ml/ml_utils.h
+++ b/src/ml/ml_utils.h
@@ -18,7 +18,9 @@
#define ML_ML_UTILS_H_
#include
+#ifdef NNTRAINER_SUPPORTED
#include
+#endif
#if __cplusplus > 201402L
#include
@@ -45,11 +47,13 @@ extern const PlatformEnum HWTypeEnum;
extern const PlatformEnum NNFWTypeEnum;
extern const PlatformEnum TensorTypeEnum;
+#ifdef NNTRAINER_SUPPORTED
extern const PlatformEnum OptimizerTypeEnum;
extern const PlatformEnum LayerTypeEnum;
extern const PlatformEnum SummaryTypeEnum;
extern const PlatformEnum ModelSaveFormatEnum;
extern const PlatformEnum DatasetModeEnum;
+#endif
} // namespace types