*/
#include "ml_instance.h"
-#include "ml_utils.h"
#include "common/converter.h"
#include "common/logger.h"
#include "common/picojson.h"
#include "common/platform_result.h"
#include "common/tools.h"
+#include "ml_utils.h"
static_assert(ML_TENSOR_RANK_LIMIT == 4,
- "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
+ "This implementation requires different ML_TENSOR_RANK_LIMIT. "
+ "Please fix the code.");
namespace extension {
namespace ml {
using namespace common;
-#define CHECK_EXIST(args, name, out) \
- if (!args.contains(name)) { \
- std::string msg = std::string(name) + " is required argument"; \
- LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
- return; \
+#define CHECK_EXIST(args, name, out) \
+ if (!args.contains(name)) { \
+ std::string msg = std::string(name) + " is required argument"; \
+ LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), \
+ &out); \
+ return; \
}
-// CHECK_TYPE will throw AbortError by default, but it can be changed by providing
-// additional parameter to the macro, i.e.:
-// CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
-#define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
+// CHECK_TYPE will throw AbortError by default, but it can be changed by
+// providing additional parameter to the macro, i.e.: CHECK_TYPE(args, "name",
+// std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
+#define CHECK_TYPE(...) \
+ CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
#define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
#define CHECK_TYPE_5(args, name, type, out, error_type) \
if (!args.get(name).is<type>()) { \
ScopeLogger();
using namespace std::placeholders;
-#define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
-#define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
+#define REGISTER_METHOD(M) \
+ RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
+#define REGISTER_BINARY_METHOD(M) \
+ RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
#define REGISTER_METHOD_WITH_BINARY_ANWSER(M) \
- RegisterSyncHandlerWithBinaryAnswer(#M, std::bind(&MlInstance::M, this, _1, _2))
+ RegisterSyncHandlerWithBinaryAnswer(#M, \
+ std::bind(&MlInstance::M, this, _1, _2))
REGISTER_METHOD(MLCheckNNFWAvailability);
REGISTER_METHOD(MLTensorsInfoCountGetter);
REGISTER_METHOD(MLPipelineManagerCustomFilterOutput);
REGISTER_METHOD(MLPipelineManagerUnregisterCustomFilter);
+#ifdef NNTRAINER_SUPPORTED
REGISTER_METHOD(MLTrainerLayerSetProperty);
REGISTER_METHOD(MLTrainerLayerCreate);
REGISTER_METHOD(MLTrainerLayerGetName);
REGISTER_METHOD(MLTrainerDatasetCreateFromFile);
REGISTER_METHOD(MLTrainerDatasetSetProperty);
REGISTER_METHOD(MLTrainerDatasetDispose);
+#endif
#undef REGISTER_METHOD
}
return tensors_data_manager_;
}
-void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLCheckNNFWAvailability(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_EXIST(args, kNnfw, out)
CHECK_EXIST(args, kHw, out)
if (args.get(kCustomRequirement).is<std::string>()) {
customRequirement = args.get(kCustomRequirement).get<std::string>();
}
- bool availability_val = util::CheckNNFWAvailability(nnfw, hw, customRequirement);
+ bool availability_val =
+ util::CheckNNFWAvailability(nnfw, hw, customRequirement);
picojson::value available = picojson::value{availability_val};
ReportSuccess(available, out);
}
-void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not create new TensorsInfo handle"));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not create new TensorsInfo handle"));
return;
}
out[kId] = picojson::value(static_cast<double>(tensorsInfo->Id()));
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
unsigned int count = 0;
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kType, std::string, out);
CHECK_ARGS(args, kDimensions, picojson::array, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
const std::string& tensorType = args.get(kType).get<std::string>();
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+ PlatformResult result =
+ types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting value of TensorType"),
&out,
- ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getValue() failed, error: %s",
+ result.message().c_str()));
return;
}
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast<int>(args.get(kIndex).get<double>());
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kDimensions, picojson::array, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast<int>(args.get(kIndex).get<double>());
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kName, std::string, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast<int>(args.get(kIndex).get<double>());
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
int index = static_cast<int>(args.get(kIndex).get<double>());
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
+ PlatformResult result =
+ tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
if (!result) {
LogAndReportError(result, &out);
return;
std::string tensorTypeString;
result = types::TensorTypeEnum.getName(tensorTypeEnum, &tensorTypeString);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting name of TensorType"),
&out,
- ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getName() failed, error: %s",
+ result.message().c_str()));
return;
}
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kType, std::string, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
const std::string& tensorType = args.get(kType).get<std::string>();
ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
- PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
+ PlatformResult result =
+ types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting value of TensorType"),
&out,
- ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getValue() failed, error: %s",
+ result.message().c_str()));
return;
}
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
- TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
+ TensorsData* tensorsData =
+ GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
if (!tensorsData) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not create TensorsData"));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not create TensorsData"));
return;
}
out[kTensorsDataId] = picojson::value(static_cast<double>(tensorsData->Id()));
- out[kTensorsInfoId] = picojson::value(static_cast<double>(tensorsData->TensorsInfoId()));
+ out[kTensorsInfoId] =
+ picojson::value(static_cast<double>(tensorsData->TensorsInfoId()));
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoClone(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
+ TensorsInfo* tensorsInfo =
+ GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
if (nullptr == tensorsInfo) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
return;
}
TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
if (nullptr == cloned) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
return;
}
ReportSuccess(out);
}
-void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoEquals(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
CHECK_ARGS(args, kOtherId, double, out);
TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
if (nullptr == first) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", firstId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out, ("Could not find TensorsInfo handle with given id: %d", firstId));
return;
}
TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
if (nullptr == second) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", secondId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", secondId));
return;
}
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsInfoDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsInfoDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsInfoId, double, out);
int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
- PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
+ PlatformResult result =
+ GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
if (!result) {
LogAndReportError(result, &out);
}
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
- int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ int tensors_data_id =
+ static_cast<int>(args.get(kTensorsDataId).get<double>());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
}
// Dispose underlying tensorsInfo
- PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
+ PlatformResult result =
+ GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
if (!result) {
LogAndReportError(result, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
int index = static_cast<int>(args.get(kIndex).get<double>());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensor_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensor_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get<picojson::array>(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
return;
}
- std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
+ std::vector<std::uint8_t> out_data{raw_data.data,
+ raw_data.data + raw_data.size_in_bytes};
out[kBuffer] = picojson::value(picojson::string_type, true);
common::encode_binary_in_string(out_data, out[kBuffer].get<std::string>());
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
- std::vector<uint8_t>* out) {
+void MlInstance::MLTensorsDataGetTensorRawDataBinary(
+ const picojson::value& args, std::vector<uint8_t>* out) {
ScopeLogger("args: %s", args.serialize().c_str());
// TODO handle errors to out
// CHECK_ARGS(args, kTensorsDataId, double, out);
int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
int index = static_cast<int>(args.get(kIndex).get<double>());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LoggerE("Could not find TensorsData handle with given id: %d", tensor_data_id);
- tools::ReportErrorToBinary(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
- out);
+ LoggerE("Could not find TensorsData handle with given id: %d",
+ tensor_data_id);
+ tools::ReportErrorToBinary(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ out);
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get<picojson::array>(), location);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(),
+ location, dimensions, size);
if (!result) {
LoggerE("Reporting error.");
tools::ReportErrorToBinary(result, out);
}
out_json[kShape] = picojson::value{shape};
- std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
+ std::vector<std::uint8_t> out_data{raw_data.data,
+ raw_data.data + raw_data.size_in_bytes};
// FORMAT:
// 4 byte === JSON lenght (N)
tools::ReportDataToBinary(out_data, out);
}
-void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
- int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ int tensors_data_id =
+ static_cast<int>(args.get(kTensorsDataId).get<double>());
int index = static_cast<int>(args.get(kIndex).get<double>());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
std::string tensor_type_string;
- PlatformResult result =
- types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
+ PlatformResult result = types::TensorTypeEnum.getName(
+ tensors_data->GetTensorType(index), &tensor_type_string);
if (!result) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
+ LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR,
+ "Error getting name of TensorType"),
&out,
- ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
+ ("TensorTypeEnum.getName() failed, error: %s",
+ result.message().c_str()));
return;
}
ReportSuccess(val, out);
}
-void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger();
CHECK_ARGS(args, kTensorsDataId, double, out);
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kBuffer, std::string, out);
CHECK_ARGS(args, kLocation, picojson::array, out);
CHECK_ARGS(args, kSize, picojson::array, out);
- LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
+ LoggerD("%s, %s", kTensorsDataId.c_str(),
+ args.get(kTensorsDataId).serialize().c_str());
LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
- int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ int tensors_data_id =
+ static_cast<int>(args.get(kTensorsDataId).get<double>());
int index = static_cast<int>(args.get(kIndex).get<double>());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get<picojson::array>(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size,
+void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data,
+ size_t data_size,
picojson::object& out) {
ScopeLogger();
/*
*/
unsigned int call_args_len_begin = 0;
unsigned int call_args_len = static_cast<unsigned int>(
- (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) +
+ (data[call_args_len_begin] << 24) +
+ (data[call_args_len_begin + 1] << 16) +
(data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3]));
unsigned int buffer_len_begin = call_args_len_begin + 4;
CHECK_ARGS(args, kIndex, double, out);
CHECK_ARGS(args, kLocation, picojson::array, out);
CHECK_ARGS(args, kSize, picojson::array, out);
- LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
+ LoggerD("%s, %s", kTensorsDataId.c_str(),
+ args.get(kTensorsDataId).serialize().c_str());
LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
- int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ int tensors_data_id =
+ static_cast<int>(args.get(kTensorsDataId).get<double>());
int index = static_cast<int>(args.get(kIndex).get<double>());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
- PlatformResult result =
- util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
+ PlatformResult result = util::GetLocationFromJsonArray(
+ args.get(kLocation).get<picojson::array>(), location);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
- result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
+ result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(
+ index, dimensions);
if (!result) {
LogAndReportError(result, &out);
return;
}
unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
- result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
- size);
+ result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(),
+ location, dimensions, size);
if (!result) {
LogAndReportError(result, &out);
return;
}
- TensorRawData raw_data{reinterpret_cast<uint8_t*>(const_cast<char*>(data + buffer_begin)),
- buffer_len};
+ TensorRawData raw_data{
+ reinterpret_cast<uint8_t*>(const_cast<char*>(data + buffer_begin)),
+ buffer_len};
result = tensors_data->SetTensorRawData(index, location, size, raw_data);
if (!result) {
LogAndReportError(result, &out);
ReportSuccess(out);
}
-void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleManagerOpenModel(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kModelPath, std::string, out);
CHECK_ARGS(args, kInTensorsInfo, double, out);
CHECK_ARGS(args, kHwType, std::string, out);
CHECK_ARGS(args, kIsDynamicMode, bool, out);
- const auto& model_path = common::tools::ConvertUriToPath(args.get(kModelPath).get<std::string>());
+ const auto& model_path =
+ common::tools::ConvertUriToPath(args.get(kModelPath).get<std::string>());
CHECK_STORAGE_ACCESS(model_path, &out);
TensorsInfo* in_tensors_info = nullptr;
if (kNoId != inTensorId) {
in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
if (nullptr == in_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", inTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", inTensorId));
return;
}
}
if (kNoId != outTensorId) {
out_tensors_info = GetTensorsInfoManager().GetTensorsInfo(outTensorId);
if (nullptr == out_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", outTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", outTensorId));
return;
}
}
ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
- PlatformResult result =
- types::NNFWTypeEnum.getValue(args.get(kFwType).get<std::string>(), &nnfw_e);
+ PlatformResult result = types::NNFWTypeEnum.getValue(
+ args.get(kFwType).get<std::string>(), &nnfw_e);
if (!result) {
LogAndReportError(result, &out);
return;
}
ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
- result = types::HWTypeEnum.getValue(args.get(kHwType).get<std::string>(), &hw_e);
+ result =
+ types::HWTypeEnum.getValue(args.get(kHwType).get<std::string>(), &hw_e);
if (!result) {
LogAndReportError(result, &out);
return;
auto is_dynamic_mode = args.get(kIsDynamicMode).get<bool>();
- auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
- is_dynamic_mode](decltype(out) out) {
+ auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e,
+ hw_e, is_dynamic_mode](decltype(out) out) {
PlatformResult result = common::tools::CheckFileAvailability(model_path);
if (!result) {
LogAndReportError(
- PlatformResult(ErrorCode::NOT_FOUND_ERR, "File does not exist or is not accessible"),
- &out, ("File does not exist or is not accessible: %s", model_path.c_str()));
+ PlatformResult(ErrorCode::NOT_FOUND_ERR,
+ "File does not exist or is not accessible"),
+ &out,
+ ("File does not exist or is not accessible: %s", model_path.c_str()));
return;
}
int res_id = -1;
- result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
- is_dynamic_mode, &res_id);
+ result =
+ single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info,
+ nnfw_e, hw_e, is_dynamic_mode, &res_id);
if (!result) {
ReportError(result, &out);
return;
ReportSuccess(out);
};
- bool async =
- (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
+ bool async = (args.contains(kAsync) && args.get(kAsync).is<bool>())
+ ? args.get(kAsync).get<bool>()
+ : false;
if (!async) {
logic(out);
}
}
-void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kGetInputMode, bool, out);
ReportSuccess(out);
}
-void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kInTensorsInfo, double, out);
auto id = static_cast<int>(args.get(kId).get<double>());
auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
- TensorsInfo* in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
+ TensorsInfo* in_tensors_info =
+ GetTensorsInfoManager().GetTensorsInfo(inTensorId);
if (nullptr == in_tensors_info) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", inTensorId));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d", inTensorId));
return;
}
- TensorsInfo* clone = GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
+ TensorsInfo* clone =
+ GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
auto ret = single_manager_.SetNativeInputInfo(id, in_tensors_info);
if (!ret) {
ReportSuccess(out);
}
-void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotInvoke(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kTensorsDataId, double, out);
int id = static_cast<int>(args.get(kId).get<double>());
- int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
- bool async =
- (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
-
- TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
+ int tensors_data_id =
+ static_cast<int>(args.get(kTensorsDataId).get<double>());
+ bool async = (args.contains(kAsync) && args.get(kAsync).is<bool>())
+ ? args.get(kAsync).get<bool>()
+ : false;
+
+ TensorsData* in_tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensors_data_id);
if (async && in_tensors_data) {
// in case of async flow need to prevent destroying entry data during invoke
// from JS, creation of a copy
in_tensors_data->GetTensorsInfo()->Handle(), in_tensors_data->Handle());
}
if (nullptr == in_tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
- ("Could not find TensorsData handle with given id: %d", tensors_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
+ &out,
+ ("Could not find TensorsData handle with given id: %d",
+ tensors_data_id));
return;
}
- auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
+ auto logic = [this, id, tensors_data_id, in_tensors_data,
+ async](decltype(out) out) {
TensorsData* out_tensors_data = nullptr;
auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
if (async) {
// in case of async flow, the in_tensor_data with underlying TensorsInfo
// was copied, thus need to be released here
- GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
+ GetTensorsInfoManager().DisposeTensorsInfo(
+ in_tensors_data->GetTensorsInfo());
GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
}
if (!ret) {
return;
}
- out[kTensorsDataId] = picojson::value(static_cast<double>(out_tensors_data->Id()));
- out[kTensorsInfoId] = picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
+ out[kTensorsDataId] =
+ picojson::value(static_cast<double>(out_tensors_data->Id()));
+ out[kTensorsInfoId] =
+ picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
ReportSuccess(out);
};
}
}
-void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotGetValue(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
ReportSuccess(val, out);
}
-void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetValue(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
ReportSuccess(out);
}
-void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotSetTimeout(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kTimeout, double, out);
ReportSuccess(out);
}
-void MlInstance::MLSingleShotClose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLSingleShotClose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
auto arguments_valid = args.get(kId).is<double>();
arguments_valid &= args.get(kDefinition).is<std::string>();
- arguments_valid &= (args.get(kPipelineStateChangeListenerName).is<std::string>() ||
- args.get(kPipelineStateChangeListenerName).is<picojson::null>());
- LoggerD("CreatePipeline arguments are %s", arguments_valid ? "valid" : "invalid");
+ arguments_valid &=
+ (args.get(kPipelineStateChangeListenerName).is<std::string>() ||
+ args.get(kPipelineStateChangeListenerName).is<picojson::null>());
+ LoggerD("CreatePipeline arguments are %s",
+ arguments_valid ? "valid" : "invalid");
return !arguments_valid;
}
ScopeLogger("args: %s", args.serialize().c_str());
if (CreatePipelineArgumentsAreInvalid(args)) {
- ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"}, &out);
+ ReportError(
+ PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"},
+ &out);
return;
}
? args.get(kPipelineStateChangeListenerName).get<std::string>()
: "";
- auto ret = pipeline_manager_.CreatePipeline(id, definition, state_change_listener_name);
+ auto ret = pipeline_manager_.CreatePipeline(id, definition,
+ state_change_listener_name);
if (!ret) {
ReportError(ret, &out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetState(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is<double>()) {
ReportSuccess(state_value, out);
}
-void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineStart(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineStop(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineDispose(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is<double>()) {
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetSource(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetSwitch(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
if (!args.get(kId).is<double>()) {
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetValve(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetValve(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
auto sink_name = args.get(kName).get<std::string>();
auto listener_name = args.get(kListenerName).get<std::string>();
- auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id, listener_name);
+ auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id,
+ listener_name);
if (!ret) {
LogAndReportError(ret, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerRegisterCustomFilter(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
const auto& custom_filter_name = args.get(kName).get<std::string>();
const auto& listener_name = args.get(kListenerName).get<std::string>();
- auto input_tensors_info_id = static_cast<int>(args.get(kInputTensorsInfoId).get<double>());
- auto output_tensors_info_id = static_cast<int>(args.get(kOutputTensorsInfoId).get<double>());
+ auto input_tensors_info_id =
+ static_cast<int>(args.get(kInputTensorsInfoId).get<double>());
+ auto output_tensors_info_id =
+ static_cast<int>(args.get(kOutputTensorsInfoId).get<double>());
TensorsInfo* input_tensors_info_ptr =
GetTensorsInfoManager().GetTensorsInfo(input_tensors_info_id);
if (!input_tensors_info_ptr) {
LogAndReportError(
- PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", input_tensors_info_id));
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d",
+ input_tensors_info_id));
return;
}
GetTensorsInfoManager().GetTensorsInfo(output_tensors_info_id);
if (!output_tensors_info_ptr) {
LogAndReportError(
- PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
- ("Could not find TensorsInfo handle with given id: %d", output_tensors_info_id));
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"),
+ &out,
+ ("Could not find TensorsInfo handle with given id: %d",
+ output_tensors_info_id));
return;
}
auto ret = pipeline_manager_.RegisterCustomFilter(
- custom_filter_name, listener_name, input_tensors_info_ptr, output_tensors_info_ptr);
+ custom_filter_name, listener_name, input_tensors_info_ptr,
+ output_tensors_info_ptr);
if (!ret) {
LogAndReportError(ret, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerCustomFilterOutput(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
auto status = static_cast<int>(args.get(kStatus).get<double>());
auto request_id = static_cast<int>(args.get(kRequestId).get<double>());
- auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
+ auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name,
+ request_id, status);
if (!ret) {
LogAndReportError(ret, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
- picojson::object& out) {
+void MlInstance::MLPipelineManagerUnregisterCustomFilter(
+ const picojson::value& args, picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kName, std::string, out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
const auto& node_name = args.get(kNodeName).get<std::string>();
const auto& type = args.get(kType).get<std::string>();
- PlatformResult result = pipeline_manager_.getProperty(id, node_name, name, type, &out);
+ PlatformResult result =
+ pipeline_manager_.getProperty(id, node_name, name, type, &out);
if (!result) {
LogAndReportError(result, &out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
}
const picojson::value& property = args.get(kProperty);
- PlatformResult result = pipeline_manager_.setProperty(id, node_name, name, type, property);
+ PlatformResult result =
+ pipeline_manager_.setProperty(id, node_name, name, type, property);
if (!result) {
LogAndReportError(result, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
const auto& name = args.get(kName).get<std::string>();
int res_id = -1;
- PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
+ PlatformResult result =
+ pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
if (!result) {
LogAndReportError(result, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSourceInputData(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
auto& source_name = args.get(kName).get<std::string>();
- auto tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
+ auto tensor_data_id =
+ static_cast<int>(args.get(kTensorsDataId).get<double>());
- TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
+ TensorsData* tensors_data =
+ GetTensorsDataManager().GetTensorsData(tensor_data_id);
if (nullptr == tensors_data) {
- LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
- ("Could not get TensorData handle with given id: %d", tensor_data_id));
+ LogAndReportError(
+ PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
+ ("Could not get TensorData handle with given id: %d", tensor_data_id));
return;
}
- auto ret = pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
+ auto ret =
+ pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
if (!ret) {
LogAndReportError(ret, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
auto& switch_name = args.get(kName).get<std::string>();
picojson::array pad_list;
- auto ret = pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
+ auto ret =
+ pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
if (!ret) {
LogAndReportError(ret, &out);
return;
ReportSuccess(picojson::value{std::move(pad_list)}, out);
}
-void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineSwitchSelect(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: [%s]", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineValveSetOpen(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLPipelineValveIsOpen(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
ReportSuccess(picojson::value{open}, out);
}
-void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out) {
+#ifdef NNTRAINER_SUPPORTED
+void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
ReportSuccess(out);
}
-void MlInstance::MLTrainerLayerCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerLayerCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
ReportSuccess(out);
}
-void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
auto name = args.get(kName).get<std::string>();
auto value = args.get(kValue).get<std::string>();
- PlatformResult result = trainer_manager_.OptimizerSetProperty(id, name, value);
+ PlatformResult result =
+ trainer_manager_.OptimizerSetProperty(id, name, value);
if (!result) {
ReportError(result, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kType, std::string, out);
int id = -1;
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelCreate(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
int id = -1;
PlatformResult result;
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelCompile(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptions, picojson::object, out);
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelAddLayer(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kLayerId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelRun(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelRun(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptions, picojson::object, out);
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSummarize(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSummarize(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kLevel, std::string, out);
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSetDataset(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kDatasetId, double, out);
ReportSuccess(out);
}
-void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kOptimizerId, double, out);
const std::string& valid_file_path = args.get(kValid).get<std::string>();
const std::string& test_file_path = args.get(kTest).get<std::string>();
- PlatformResult result =
- trainer_manager_.CreateFileDataset(id, train_file_path, valid_file_path, test_file_path);
+ PlatformResult result = trainer_manager_.CreateFileDataset(
+ id, train_file_path, valid_file_path, test_file_path);
if (!result) {
ReportError(result, &out);
return;
ReportSuccess(out);
}
-void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out) {
+void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args,
+ picojson::object& out) {
ScopeLogger("args: %s", args.serialize().c_str());
CHECK_ARGS(args, kId, double, out);
CHECK_ARGS(args, kName, std::string, out);
}
ReportSuccess(out);
}
+#endif
#undef CHECK_EXIST
#undef CHECK_TYPE