2 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ml_instance.h"
20 #include "common/converter.h"
21 #include "common/logger.h"
22 #include "common/picojson.h"
23 #include "common/platform_result.h"
24 #include "common/tools.h"
26 static_assert(ML_TENSOR_RANK_LIMIT == 4,
27 "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
33 const int kCustomFilterSuccess = 0;
36 const std::string kAsync = "async";
37 const std::string kBOOLEAN = "BOOLEAN";
38 const std::string kBuffer = "buffer";
39 const std::string kCallbackId = "callbackId";
40 const std::string kConfigPath = "configPath";
41 const std::string kCustomRequirement = "customRequirement";
42 const std::string kDataId = "dataId";
43 const std::string kDatasetId = "datasetId";
44 const std::string kDefinition = "definition";
45 const std::string kDimensions = "dimensions";
46 const std::string kFwType = "fwType";
47 const std::string kGetInputMode = "getInputMode";
48 const std::string kHw = "hw";
49 const std::string kHwType = "hwType";
50 const std::string kId = "id";
51 const std::string kInTensorsInfo = "inTensorsInfo";
52 const std::string kIndex = "index";
53 const std::string kInputTensorsInfoId = "inputTensorsInfoId";
54 const std::string kIsDynamicMode = "isDynamicMode";
55 const std::string kLayerId = "layerId";
56 const std::string kLevel = "level";
57 const std::string kListenerName = "listenerName";
58 const std::string kLocation = "location";
59 const std::string kMode = "mode";
60 const std::string kModelPath = "modelPath";
61 const std::string kName = "name";
62 const std::string kNnfw = "nnfw";
63 const std::string kNodeName = "nodeName";
64 const std::string kOpen = "open";
65 const std::string kOptimizerId = "optimizerId";
66 const std::string kOptions = "options";
67 const std::string kOtherId = "otherId";
68 const std::string kOutTensorsInfo = "outTensorsInfo";
69 const std::string kOutputTensorsInfoId = "outputTensorsInfoId";
70 const std::string kPadName = "padName";
71 const std::string kPipelineStateChangeListenerName = "listenerName";
72 const std::string kProperty = "property";
73 const std::string kRequestId = "requestId";
74 const std::string kSTRING = "STRING";
75 const std::string kSaveFormat = "saveFormat";
76 const std::string kSavePath = "savePath";
77 const std::string kShape = "shape";
78 const std::string kSize = "size";
79 const std::string kStatus = "status";
80 const std::string kSummary = "summary";
81 const std::string kTensorsDataId = "tensorsDataId";
82 const std::string kTensorsInfoId = "tensorsInfoId";
83 const std::string kTest = "test";
84 const std::string kTimeout = "timeout";
85 const std::string kTrain = "train";
86 const std::string kType = "type";
87 const std::string kValid = "valid";
88 const std::string kValue = "value";
92 using namespace common;
94 #define CHECK_EXIST(args, name, out) \
95 if (!args.contains(name)) { \
96 std::string msg = std::string(name) + " is required argument"; \
97 LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
101 // CHECK_TYPE will throw AbortError by default, but it can be changed by providing
102 // additional parameter to the macro, i.e.:
103 // CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
104 #define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
105 #define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
106 #define CHECK_TYPE_5(args, name, type, out, error_type) \
107 if (!args.get(name).is<type>()) { \
108 std::string msg = std::string(name) + " has invalid type"; \
109 LogAndReportError(PlatformResult(error_type, msg), &out); \
112 #define CHECK_TYPE_4(args, name, type, out) \
113 CHECK_TYPE_5(args, name, type, out, ErrorCode::ABORT_ERR)
115 #define CHECK_ARGS(args, name, type, out) \
116 CHECK_EXIST(args, name, out) \
117 CHECK_TYPE(args, name, type, out)
119 MlInstance::MlInstance()
120 : tensors_info_manager_{&tensors_data_manager_},
121 single_manager_{&tensors_info_manager_},
122 pipeline_manager_{this, &tensors_info_manager_, &tensors_data_manager_} {
124 using namespace std::placeholders;
126 #define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
127 #define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
128 #define REGISTER_METHOD_WITH_BINARY_ANWSER(M) \
129 RegisterSyncHandlerWithBinaryAnswer(#M, std::bind(&MlInstance::M, this, _1, _2))
131 REGISTER_METHOD(MLCheckNNFWAvailability);
132 REGISTER_METHOD(MLTensorsInfoCountGetter);
133 REGISTER_METHOD(MLTensorsInfoAddTensorInfo);
134 REGISTER_METHOD(MLTensorsInfoCreate);
135 REGISTER_METHOD(MLTensorsInfoGetDimensions);
136 REGISTER_METHOD(MLTensorsInfoSetDimensions);
137 REGISTER_METHOD(MLTensorsInfoGetTensorName);
138 REGISTER_METHOD(MLTensorsInfoSetTensorName);
139 REGISTER_METHOD(MLTensorsInfoGetTensorSize);
140 REGISTER_METHOD(MLTensorsInfoGetTensorType);
141 REGISTER_METHOD(MLTensorsInfoSetTensorType);
142 REGISTER_METHOD(MLTensorsInfoGetTensorsData);
143 REGISTER_METHOD(MLTensorsInfoClone);
144 REGISTER_METHOD(MLTensorsInfoEquals);
145 REGISTER_METHOD(MLTensorsInfoDispose);
146 REGISTER_METHOD(MLPipelineValveSetOpen);
147 REGISTER_METHOD(MLPipelineValveIsOpen);
149 REGISTER_METHOD(MLTensorsDataDispose);
150 REGISTER_METHOD(MLTensorsDataGetTensorRawData);
151 REGISTER_METHOD_WITH_BINARY_ANWSER(MLTensorsDataGetTensorRawDataBinary);
152 REGISTER_METHOD(MLTensorsDataGetTensorType);
153 REGISTER_METHOD(MLTensorsDataSetTensorRawData);
154 REGISTER_BINARY_METHOD(MLTensorsDataSetTensorRawDataBinary);
156 REGISTER_METHOD(MLSingleManagerOpenModel);
157 REGISTER_METHOD(MLSingleShotGetTensorsInfo);
158 REGISTER_METHOD(MLSingleShotSetInputInfo);
159 REGISTER_METHOD(MLSingleShotInvoke);
160 REGISTER_METHOD(MLSingleShotGetValue);
161 REGISTER_METHOD(MLSingleShotSetValue);
162 REGISTER_METHOD(MLSingleShotSetTimeout);
163 REGISTER_METHOD(MLSingleShotClose);
165 REGISTER_METHOD(MLPipelineManagerCreatePipeline);
166 REGISTER_METHOD(MLPipelineGetState);
167 REGISTER_METHOD(MLPipelineDispose);
168 REGISTER_METHOD(MLPipelineStart);
169 REGISTER_METHOD(MLPipelineStop);
170 REGISTER_METHOD(MLPipelineGetNodeInfo);
171 REGISTER_METHOD(MLPipelineGetSwitch);
172 REGISTER_METHOD(MLPipelineSwitchGetPadList);
173 REGISTER_METHOD(MLPipelineSwitchSelect);
174 REGISTER_METHOD(MLPipelineGetValve);
175 REGISTER_METHOD(MLPipelineNodeInfoGetProperty);
176 REGISTER_METHOD(MLPipelineNodeInfoSetProperty);
177 REGISTER_METHOD(MLPipelineGetSource);
178 REGISTER_METHOD(MLPipelineGetInputTensorsInfo);
179 REGISTER_METHOD(MLPipelineSourceInputData);
180 REGISTER_METHOD(MLPipelineRegisterSinkListener);
181 REGISTER_METHOD(MLPipelineUnregisterSinkListener);
182 REGISTER_METHOD(MLPipelineManagerRegisterCustomFilter);
183 REGISTER_METHOD(MLPipelineManagerCustomFilterOutput);
184 REGISTER_METHOD(MLPipelineManagerUnregisterCustomFilter);
186 REGISTER_METHOD(MLTrainerLayerSetProperty);
187 REGISTER_METHOD(MLTrainerLayerCreate);
188 REGISTER_METHOD(MLTrainerLayerGetName);
189 REGISTER_METHOD(MLTrainerLayerDispose);
190 REGISTER_METHOD(MLTrainerOptimizerSetProperty);
191 REGISTER_METHOD(MLTrainerOptimizerCreate);
192 REGISTER_METHOD(MLTrainerOptimizerDispose);
193 REGISTER_METHOD(MLTrainerModelCreate);
194 REGISTER_METHOD(MLTrainerModelCompile);
195 REGISTER_METHOD(MLTrainerModelAddLayer);
196 REGISTER_METHOD(MLTrainerModelRun);
197 REGISTER_METHOD(MLTrainerModelSummarize);
198 REGISTER_METHOD(MLTrainerModelCheckMetrics);
199 REGISTER_METHOD(MLTrainerModelSave);
200 REGISTER_METHOD(MLTrainerModelLoad);
201 REGISTER_METHOD(MLTrainerModelSetDataset);
202 REGISTER_METHOD(MLTrainerModelSetOptimizer);
203 REGISTER_METHOD(MLTrainerModelDispose);
204 REGISTER_METHOD(MLTrainerDatasetCreateFromFile);
205 REGISTER_METHOD(MLTrainerDatasetSetProperty);
206 REGISTER_METHOD(MLTrainerDatasetDispose);
208 #undef REGISTER_METHOD
211 MlInstance::~MlInstance() {
216 TensorsInfoManager& MlInstance::GetTensorsInfoManager() {
217 return tensors_info_manager_;
220 TensorsDataManager& MlInstance::GetTensorsDataManager() {
221 return tensors_data_manager_;
224 void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
225 ScopeLogger("args: %s", args.serialize().c_str());
226 CHECK_EXIST(args, kNnfw, out)
227 CHECK_EXIST(args, kHw, out)
228 CHECK_EXIST(args, kCustomRequirement, out)
230 std::string nnfw = args.get(kNnfw).get<std::string>();
231 std::string hw = args.get(kHw).get<std::string>();
232 optional<std::string> customRequirement;
233 if (args.get(kCustomRequirement).is<std::string>()) {
234 customRequirement = args.get(kCustomRequirement).get<std::string>();
236 bool availability_val = util::CheckNNFWAvailability(nnfw, hw, customRequirement);
238 picojson::value available = picojson::value{availability_val};
239 ReportSuccess(available, out);
242 void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
243 ScopeLogger("args: %s", args.serialize().c_str());
245 TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
246 if (nullptr == tensorsInfo) {
247 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
248 ("Could not create new TensorsInfo handle"));
251 out[kId] = picojson::value(static_cast<double>(tensorsInfo->Id()));
255 void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
256 ScopeLogger("args: %s", args.serialize().c_str());
257 CHECK_ARGS(args, kTensorsInfoId, double, out);
259 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
260 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
261 if (nullptr == tensorsInfo) {
262 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
263 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
266 unsigned int count = 0;
267 PlatformResult result = tensorsInfo->NativeGetCount(&count);
269 ReportError(result, &out);
272 picojson::value val = picojson::value{static_cast<double>(count)};
273 ReportSuccess(val, out);
276 void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
277 ScopeLogger("args: %s", args.serialize().c_str());
278 CHECK_ARGS(args, kTensorsInfoId, double, out);
279 CHECK_ARGS(args, kType, std::string, out);
281 CHECK_EXIST(args, kName, out);
282 CHECK_ARGS(args, kDimensions, picojson::array, out);
284 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
285 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
286 if (nullptr == tensorsInfo) {
287 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
288 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
292 const std::string& tensorType = args.get(kType).get<std::string>();
293 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
294 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
296 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
298 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
303 if (args.get(kName).is<std::string>()) {
304 name = args.get(kName).get<std::string>();
305 LoggerD("name: %s", name.c_str());
308 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
309 auto dim = args.get(kDimensions).get<picojson::array>();
310 result = util::GetDimensionsFromJsonArray(dim, dimensions);
312 LogAndReportError(result, &out);
316 result = tensorsInfo->AddTensorInfo(name, tensorTypeEnum, dimensions);
318 LogAndReportError(result, &out);
322 int count = tensorsInfo->Count() - 1;
324 picojson::value val = picojson::value{static_cast<double>(count)};
325 ReportSuccess(val, out);
328 void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out) {
329 ScopeLogger("args: %s", args.serialize().c_str());
330 CHECK_ARGS(args, kTensorsInfoId, double, out);
331 CHECK_ARGS(args, kIndex, double, out);
333 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
334 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
335 if (nullptr == tensorsInfo) {
336 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
337 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
340 int index = static_cast<int>(args.get(kIndex).get<double>());
341 unsigned int dim[ML_TENSOR_RANK_LIMIT];
342 PlatformResult result = tensorsInfo->NativeGetTensorDimensions(index, dim);
344 LogAndReportError(result, &out);
347 picojson::array array = picojson::array{};
348 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
352 array.push_back(picojson::value{static_cast<double>(dim[i])});
354 picojson::value val = picojson::value{array};
355 ReportSuccess(val, out);
358 void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out) {
359 ScopeLogger("args: %s", args.serialize().c_str());
360 CHECK_ARGS(args, kTensorsInfoId, double, out);
361 CHECK_ARGS(args, kIndex, double, out);
362 CHECK_ARGS(args, kDimensions, picojson::array, out);
364 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
365 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
366 if (nullptr == tensorsInfo) {
367 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
368 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
372 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
373 auto dim = args.get(kDimensions).get<picojson::array>();
374 PlatformResult result = util::GetDimensionsFromJsonArray(dim, dimensions);
376 LogAndReportError(result, &out);
380 int index = static_cast<int>(args.get(kIndex).get<double>());
381 result = tensorsInfo->NativeSetTensorDimensions(index, dimensions);
383 LogAndReportError(result, &out);
389 void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out) {
390 ScopeLogger("args: %s", args.serialize().c_str());
391 CHECK_ARGS(args, kTensorsInfoId, double, out);
392 CHECK_ARGS(args, kIndex, double, out);
394 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
395 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
396 if (nullptr == tensorsInfo) {
397 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
398 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
401 int index = static_cast<int>(args.get(kIndex).get<double>());
403 PlatformResult result = tensorsInfo->NativeGetTensorName(index, &name);
405 LogAndReportError(result, &out);
408 picojson::value val = picojson::value{name};
409 ReportSuccess(val, out);
412 void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out) {
413 ScopeLogger("args: %s", args.serialize().c_str());
414 CHECK_ARGS(args, kTensorsInfoId, double, out);
415 CHECK_ARGS(args, kIndex, double, out);
416 CHECK_ARGS(args, kName, std::string, out);
418 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
419 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
420 if (nullptr == tensorsInfo) {
421 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
422 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
426 int index = static_cast<int>(args.get(kIndex).get<double>());
427 const std::string& name = args.get(kName).get<std::string>();
428 PlatformResult result = tensorsInfo->NativeSetTensorName(index, name);
430 LogAndReportError(result, &out);
436 void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out) {
437 ScopeLogger("args: %s", args.serialize().c_str());
438 CHECK_ARGS(args, kTensorsInfoId, double, out);
439 CHECK_ARGS(args, kIndex, double, out);
441 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
442 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
443 if (nullptr == tensorsInfo) {
444 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
445 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
448 int index = static_cast<int>(args.get(kIndex).get<double>());
450 PlatformResult result = tensorsInfo->NativeGetTensorSize(index, &size);
452 LogAndReportError(result, &out);
456 picojson::value val = picojson::value{static_cast<double>(size)};
457 ReportSuccess(val, out);
460 void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out) {
461 ScopeLogger("args: %s", args.serialize().c_str());
462 CHECK_ARGS(args, kTensorsInfoId, double, out);
463 CHECK_ARGS(args, kIndex, double, out);
465 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
466 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
467 if (nullptr == tensorsInfo) {
468 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
469 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
473 int index = static_cast<int>(args.get(kIndex).get<double>());
474 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
475 PlatformResult result = tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
477 LogAndReportError(result, &out);
480 std::string tensorTypeString;
481 result = types::TensorTypeEnum.getName(tensorTypeEnum, &tensorTypeString);
483 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
485 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
489 picojson::value val = picojson::value{tensorTypeString};
490 ReportSuccess(val, out);
493 void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out) {
494 ScopeLogger("args: %s", args.serialize().c_str());
495 CHECK_ARGS(args, kTensorsInfoId, double, out);
496 CHECK_ARGS(args, kIndex, double, out);
497 CHECK_ARGS(args, kType, std::string, out);
499 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
500 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
501 if (nullptr == tensorsInfo) {
502 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
503 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
507 const std::string& tensorType = args.get(kType).get<std::string>();
508 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
509 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
511 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
513 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
517 int index = static_cast<int>(args.get(kIndex).get<double>());
518 result = tensorsInfo->NativeSetTensorType(index, tensorTypeEnum);
520 LogAndReportError(result, &out);
526 void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
527 ScopeLogger("args: %s", args.serialize().c_str());
528 CHECK_ARGS(args, kTensorsInfoId, double, out);
530 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
531 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
532 if (nullptr == tensorsInfo) {
533 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
534 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
538 TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
540 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
541 ("Could not create TensorsData"));
545 out[kTensorsDataId] = picojson::value(static_cast<double>(tensorsData->Id()));
546 out[kTensorsInfoId] = picojson::value(static_cast<double>(tensorsData->TensorsInfoId()));
550 void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
551 ScopeLogger("args: %s", args.serialize().c_str());
552 CHECK_ARGS(args, kTensorsInfoId, double, out);
554 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
555 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
556 if (nullptr == tensorsInfo) {
557 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
558 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
562 TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
563 if (nullptr == cloned) {
564 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
565 ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
569 out["id"] = picojson::value(static_cast<double>(cloned->Id()));
573 void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
574 ScopeLogger("args: %s", args.serialize().c_str());
575 CHECK_ARGS(args, kTensorsInfoId, double, out);
576 CHECK_ARGS(args, kOtherId, double, out);
578 int firstId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
579 int secondId = static_cast<int>(args.get(kOtherId).get<double>());
581 TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
582 if (nullptr == first) {
583 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
584 ("Could not find TensorsInfo handle with given id: %d", firstId));
588 TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
589 if (nullptr == second) {
590 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
591 ("Could not find TensorsInfo handle with given id: %d", secondId));
595 bool equals = first->Equals(second);
596 picojson::value val = picojson::value{equals};
597 ReportSuccess(val, out);
600 void MlInstance::MLTensorsInfoDispose(const picojson::value& args, picojson::object& out) {
601 ScopeLogger("args: %s", args.serialize().c_str());
602 CHECK_ARGS(args, kTensorsInfoId, double, out);
603 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
605 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
607 LogAndReportError(result, &out);
612 void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::object& out) {
613 ScopeLogger("args: %s", args.serialize().c_str());
614 CHECK_ARGS(args, kTensorsDataId, double, out);
615 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
617 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
618 if (nullptr == tensors_data) {
619 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
620 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
624 if (!tensors_data->DisposableFromJS()) {
629 // Dispose underlying tensorsInfo
630 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
632 LogAndReportError(result, &out);
636 result = GetTensorsDataManager().DisposeTensorsData(tensors_data_id);
638 LogAndReportError(result, &out);
644 void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
645 ScopeLogger("args: %s", args.serialize().c_str());
646 CHECK_ARGS(args, kTensorsDataId, double, out);
647 CHECK_ARGS(args, kIndex, double, out);
648 CHECK_ARGS(args, kLocation, picojson::array, out);
649 CHECK_ARGS(args, kSize, picojson::array, out);
651 int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
652 int index = static_cast<int>(args.get(kIndex).get<double>());
654 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
655 if (nullptr == tensors_data) {
656 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
657 ("Could not find TensorsData handle with given id: %d", tensor_data_id));
661 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
662 PlatformResult result =
663 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
665 LogAndReportError(result, &out);
669 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
670 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
672 LogAndReportError(result, &out);
676 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
677 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
680 LogAndReportError(result, &out);
684 TensorRawData raw_data;
685 result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
687 LogAndReportError(result, &out);
691 std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
692 out[kBuffer] = picojson::value(picojson::string_type, true);
693 common::encode_binary_in_string(out_data, out[kBuffer].get<std::string>());
695 out[kType] = picojson::value(raw_data.type_str);
696 picojson::array shape = picojson::array{};
697 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
698 shape.push_back(picojson::value{static_cast<double>(raw_data.shape[i])});
700 out[kShape] = picojson::value{shape};
705 void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
706 std::vector<uint8_t>* out) {
707 ScopeLogger("args: %s", args.serialize().c_str());
708 // TODO handle errors to out
709 // CHECK_ARGS(args, kTensorsDataId, double, out);
710 // CHECK_ARGS(args, kIndex, double, out);
711 // CHECK_ARGS(args, kLocation, picojson::array, out);
712 // CHECK_ARGS(args, kSize, picojson::array, out);
714 int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
715 int index = static_cast<int>(args.get(kIndex).get<double>());
717 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
718 if (nullptr == tensors_data) {
719 LoggerE("Could not find TensorsData handle with given id: %d", tensor_data_id);
720 tools::ReportErrorToBinary(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
725 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
726 PlatformResult result =
727 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
729 LoggerE("Reporting error.");
730 tools::ReportErrorToBinary(result, out);
734 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
735 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
737 LoggerE("Reporting error.");
738 tools::ReportErrorToBinary(result, out);
742 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
743 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
746 LoggerE("Reporting error.");
747 tools::ReportErrorToBinary(result, out);
751 TensorRawData raw_data;
752 result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
754 LoggerE("Reporting error.");
755 tools::ReportErrorToBinary(result, out);
759 picojson::value result_json = picojson::value(picojson::object());
760 auto& out_json = result_json.get<picojson::object>();
762 out_json[kType] = picojson::value(raw_data.type_str);
763 picojson::array shape = picojson::array{};
764 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
765 shape.push_back(picojson::value{static_cast<double>(raw_data.shape[i])});
767 out_json[kShape] = picojson::value{shape};
769 std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
772 // 4 byte === JSON lenght (N)
773 // N bytest === JSON data
774 tools::ReportSuccessToBinary(result_json, out);
775 // 4 byte === buffer length (M)
776 // M bytes === buffer data
777 tools::ReportDataToBinary(out_data, out);
780 void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
781 ScopeLogger("args: %s", args.serialize().c_str());
782 CHECK_ARGS(args, kTensorsDataId, double, out);
783 CHECK_ARGS(args, kIndex, double, out);
785 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
786 int index = static_cast<int>(args.get(kIndex).get<double>());
788 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
789 if (nullptr == tensors_data) {
790 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
791 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
795 std::string tensor_type_string;
796 PlatformResult result =
797 types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
799 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
801 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
805 picojson::value val = picojson::value{tensor_type_string};
806 ReportSuccess(val, out);
809 void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
811 CHECK_ARGS(args, kTensorsDataId, double, out);
812 CHECK_ARGS(args, kIndex, double, out);
813 CHECK_ARGS(args, kBuffer, std::string, out);
814 CHECK_ARGS(args, kLocation, picojson::array, out);
815 CHECK_ARGS(args, kSize, picojson::array, out);
816 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
817 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
818 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
819 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
821 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
822 int index = static_cast<int>(args.get(kIndex).get<double>());
824 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
825 if (nullptr == tensors_data) {
826 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
827 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
831 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
832 PlatformResult result =
833 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
835 LogAndReportError(result, &out);
839 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
840 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
842 LogAndReportError(result, &out);
846 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
847 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
850 LogAndReportError(result, &out);
853 const std::string& str_buffer = args.get(kBuffer).get<std::string>();
854 std::vector<std::uint8_t> buffer;
855 common::decode_binary_from_string(str_buffer, buffer);
857 TensorRawData raw_data{buffer.data(), buffer.size()};
858 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
860 LogAndReportError(result, &out);
867 void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size,
868 picojson::object& out) {
871 METHOD_ID WAS ALREADY REMOVED during message handling
872 other data packed with following format:
874 // 1 byte === methodIndex /// already parsed
875 // 4 byte === JSON lenght (N)
876 // 4 byte === buffer length (M)
877 // N bytest === JSON data
878 // M bytes === buffer data
880 unsigned int call_args_len_begin = 0;
881 unsigned int call_args_len = static_cast<unsigned int>(
882 (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) +
883 (data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3]));
885 unsigned int buffer_len_begin = call_args_len_begin + 4;
886 unsigned int buffer_len = static_cast<unsigned int>(
887 (data[buffer_len_begin] << 24) + (data[buffer_len_begin + 1] << 16) +
888 (data[buffer_len_begin + 2] << 8) + (data[buffer_len_begin + 3]));
890 unsigned int call_args_begin = buffer_len_begin + 4;
891 std::string call_args(data + call_args_begin, call_args_len);
893 picojson::value args;
894 picojson::parse(args, call_args);
896 unsigned int buffer_begin = call_args_begin + call_args_len;
898 CHECK_ARGS(args, kTensorsDataId, double, out);
899 CHECK_ARGS(args, kIndex, double, out);
900 CHECK_ARGS(args, kLocation, picojson::array, out);
901 CHECK_ARGS(args, kSize, picojson::array, out);
902 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
903 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
904 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
905 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
907 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
908 int index = static_cast<int>(args.get(kIndex).get<double>());
910 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
911 if (nullptr == tensors_data) {
912 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
913 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
917 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
918 PlatformResult result =
919 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
921 LogAndReportError(result, &out);
925 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
926 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
928 LogAndReportError(result, &out);
932 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
933 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
936 LogAndReportError(result, &out);
940 TensorRawData raw_data{reinterpret_cast<uint8_t*>(const_cast<char*>(data + buffer_begin)),
942 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
944 LogAndReportError(result, &out);
951 void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) {
952 ScopeLogger("args: %s", args.serialize().c_str());
953 CHECK_ARGS(args, kModelPath, std::string, out);
954 CHECK_ARGS(args, kInTensorsInfo, double, out);
955 CHECK_ARGS(args, kOutTensorsInfo, double, out);
956 CHECK_ARGS(args, kFwType, std::string, out);
957 CHECK_ARGS(args, kHwType, std::string, out);
958 CHECK_ARGS(args, kIsDynamicMode, bool, out);
960 const auto& model_path = common::tools::ConvertUriToPath(args.get(kModelPath).get<std::string>());
961 CHECK_STORAGE_ACCESS(model_path, &out);
963 TensorsInfo* in_tensors_info = nullptr;
964 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
965 if (kNoId != inTensorId) {
966 in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
967 if (nullptr == in_tensors_info) {
968 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
969 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
974 TensorsInfo* out_tensors_info = nullptr;
975 auto outTensorId = static_cast<int>(args.get(kOutTensorsInfo).get<double>());
976 if (kNoId != outTensorId) {
977 out_tensors_info = GetTensorsInfoManager().GetTensorsInfo(outTensorId);
978 if (nullptr == out_tensors_info) {
979 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
980 ("Could not find TensorsInfo handle with given id: %d", outTensorId));
985 ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
986 PlatformResult result =
987 types::NNFWTypeEnum.getValue(args.get(kFwType).get<std::string>(), &nnfw_e);
989 LogAndReportError(result, &out);
993 ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
994 result = types::HWTypeEnum.getValue(args.get(kHwType).get<std::string>(), &hw_e);
996 LogAndReportError(result, &out);
1000 auto is_dynamic_mode = args.get(kIsDynamicMode).get<bool>();
1002 auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
1003 is_dynamic_mode](decltype(out) out) {
1004 PlatformResult result = common::tools::CheckFileAvailability(model_path);
1007 PlatformResult(ErrorCode::NOT_FOUND_ERR, "File does not exist or is not accessible"),
1008 &out, ("File does not exist or is not accessible: %s", model_path.c_str()));
1013 result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
1014 is_dynamic_mode, &res_id);
1016 ReportError(result, &out);
1020 out[kId] = picojson::value(static_cast<double>(res_id));
1025 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
1031 CHECK_ARGS(args, kCallbackId, double, out);
1032 double callback_id = args.get(kCallbackId).get<double>();
1033 this->worker_.add_job([this, callback_id, logic] {
1034 picojson::value response = picojson::value(picojson::object());
1035 picojson::object& async_out = response.get<picojson::object>();
1036 async_out[kCallbackId] = picojson::value(callback_id);
1038 this->PostMessage(response.serialize().c_str());
1046 void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out) {
1047 ScopeLogger("args: %s", args.serialize().c_str());
1048 CHECK_ARGS(args, kId, double, out);
1049 CHECK_ARGS(args, kGetInputMode, bool, out);
1051 auto id = static_cast<int>(args.get(kId).get<double>());
1052 // true means gathering input data; false means gathering output data
1053 auto get_input_mode = static_cast<int>(args.get(kGetInputMode).get<bool>());
1056 auto ret = single_manager_.GetNativeTensorsInfo(id, get_input_mode, &res_id);
1058 ReportError(ret, &out);
1062 out[kId] = picojson::value(static_cast<double>(res_id));
1066 void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out) {
1067 ScopeLogger("args: %s", args.serialize().c_str());
1068 CHECK_ARGS(args, kId, double, out);
1069 CHECK_ARGS(args, kInTensorsInfo, double, out);
1071 auto id = static_cast<int>(args.get(kId).get<double>());
1072 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
1074 TensorsInfo* in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
1075 if (nullptr == in_tensors_info) {
1076 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1077 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
1081 TensorsInfo* clone = GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
1083 auto ret = single_manager_.SetNativeInputInfo(id, in_tensors_info);
1085 ReportError(ret, &out);
1089 out[kId] = picojson::value(static_cast<double>(clone->Id()));
1093 void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::object& out) {
1094 ScopeLogger("args: %s", args.serialize().c_str());
1095 CHECK_ARGS(args, kId, double, out);
1096 CHECK_ARGS(args, kTensorsDataId, double, out);
1098 int id = static_cast<int>(args.get(kId).get<double>());
1099 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
1101 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
1103 TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
1104 if (async && in_tensors_data) {
1105 // in case of async flow need to prevent destroying entry data during invoke
1106 // from JS, creation of a copy
1107 in_tensors_data = GetTensorsInfoManager().CloneNativeTensorWithData(
1108 in_tensors_data->GetTensorsInfo()->Handle(), in_tensors_data->Handle());
1110 if (nullptr == in_tensors_data) {
1111 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
1112 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
1116 auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
1117 TensorsData* out_tensors_data = nullptr;
1118 auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
1120 // in case of async flow, the in_tensor_data with underlying TensorsInfo
1121 // was copied, thus need to be released here
1122 GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
1123 GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
1126 ReportError(ret, &out);
1130 out[kTensorsDataId] = picojson::value(static_cast<double>(out_tensors_data->Id()));
1131 out[kTensorsInfoId] = picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
1138 CHECK_ARGS(args, kCallbackId, double, out);
1139 double callback_id = args.get(kCallbackId).get<double>();
1140 this->worker_.add_job([this, callback_id, logic] {
1141 picojson::value response = picojson::value(picojson::object());
1142 picojson::object& async_out = response.get<picojson::object>();
1143 async_out[kCallbackId] = picojson::value(callback_id);
1145 this->PostMessage(response.serialize().c_str());
1153 void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {
1154 ScopeLogger("args: %s", args.serialize().c_str());
1155 CHECK_ARGS(args, kId, double, out);
1156 CHECK_ARGS(args, kName, std::string, out);
1158 auto id = static_cast<int>(args.get(kId).get<double>());
1159 const auto& name = args.get(kName).get<std::string>();
1161 auto ret = single_manager_.GetValue(id, name, value);
1163 ReportError(ret, &out);
1167 picojson::value val = picojson::value{value};
1168 ReportSuccess(val, out);
1171 void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::object& out) {
1172 ScopeLogger("args: %s", args.serialize().c_str());
1173 CHECK_ARGS(args, kId, double, out);
1174 CHECK_ARGS(args, kName, std::string, out);
1175 CHECK_ARGS(args, kValue, std::string, out);
1177 auto id = static_cast<int>(args.get(kId).get<double>());
1178 const auto& name = args.get(kName).get<std::string>();
1179 const auto& value = args.get(kValue).get<std::string>();
1181 auto ret = single_manager_.SetValue(id, name, value);
1183 ReportError(ret, &out);
1190 void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out) {
1191 ScopeLogger("args: %s", args.serialize().c_str());
1192 CHECK_ARGS(args, kId, double, out);
1193 CHECK_ARGS(args, kTimeout, double, out);
1195 auto id = static_cast<int>(args.get(kId).get<double>());
1196 auto timeout = static_cast<unsigned long>(args.get(kTimeout).get<double>());
1198 auto ret = single_manager_.SetTimeout(id, timeout);
1200 ReportError(ret, &out);
1207 void MlInstance::MLSingleShotClose(const picojson::value& args, picojson::object& out) {
1208 ScopeLogger("args: %s", args.serialize().c_str());
1209 CHECK_ARGS(args, kId, double, out);
1211 auto id = static_cast<int>(args.get(kId).get<double>());
1213 auto ret = single_manager_.Close(id);
1215 ReportError(ret, &out);
1224 bool CreatePipelineArgumentsAreInvalid(const picojson::value& args) {
1227 auto arguments_valid = args.get(kId).is<double>();
1228 arguments_valid &= args.get(kDefinition).is<std::string>();
1229 arguments_valid &= (args.get(kPipelineStateChangeListenerName).is<std::string>() ||
1230 args.get(kPipelineStateChangeListenerName).is<picojson::null>());
1231 LoggerD("CreatePipeline arguments are %s", arguments_valid ? "valid" : "invalid");
1233 return !arguments_valid;
1238 void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
1239 picojson::object& out) {
1240 ScopeLogger("args: %s", args.serialize().c_str());
1242 if (CreatePipelineArgumentsAreInvalid(args)) {
1243 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"}, &out);
1247 auto id = static_cast<int>(args.get(kId).get<double>());
1248 auto definition = args.get(kDefinition).get<std::string>();
1249 auto state_change_listener_name =
1250 args.get(kPipelineStateChangeListenerName).is<std::string>()
1251 ? args.get(kPipelineStateChangeListenerName).get<std::string>()
1254 auto ret = pipeline_manager_.CreatePipeline(id, definition, state_change_listener_name);
1257 ReportError(ret, &out);
1264 void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::object& out) {
1265 ScopeLogger("args: %s", args.serialize().c_str());
1267 if (!args.get(kId).is<double>()) {
1268 LoggerD("id is not a number");
1269 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1272 auto id = static_cast<int>(args.get(kId).get<double>());
1274 picojson::value state_value{std::string{}};
1275 std::string* state_ptr = &state_value.get<std::string>();
1276 auto ret = pipeline_manager_.GetPipelineState(id, state_ptr);
1278 ReportError(ret, &out);
1282 ReportSuccess(state_value, out);
1285 void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object& out) {
1286 ScopeLogger("args: %s", args.serialize().c_str());
1288 CHECK_ARGS(args, kId, double, out);
1290 auto id = static_cast<int>(args.get(kId).get<double>());
1292 PlatformResult result = pipeline_manager_.Start(id);
1295 ReportError(result, &out);
1302 void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& out) {
1303 ScopeLogger("args: %s", args.serialize().c_str());
1305 CHECK_ARGS(args, kId, double, out);
1307 auto id = static_cast<int>(args.get(kId).get<double>());
1309 PlatformResult result = pipeline_manager_.Stop(id);
1312 LogAndReportError(result, &out);
1319 void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object& out) {
1320 ScopeLogger("args: %s", args.serialize().c_str());
1322 if (!args.get(kId).is<double>()) {
1323 LoggerD("id is not a number");
1324 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1328 auto id = static_cast<int>(args.get(kId).get<double>());
1329 auto ret = pipeline_manager_.DisposePipeline(id);
1332 ReportError(ret, &out);
1339 void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out) {
1340 ScopeLogger("args: %s", args.serialize().c_str());
1342 CHECK_ARGS(args, kId, double, out);
1343 CHECK_ARGS(args, kName, std::string, out);
1345 auto name = args.get(kName).get<std::string>();
1346 auto id = static_cast<int>(args.get(kId).get<double>());
1348 PlatformResult result = pipeline_manager_.GetNodeInfo(id, name);
1351 LogAndReportError(result, &out);
1358 void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
1359 ScopeLogger("args: %s", args.serialize().c_str());
1361 CHECK_ARGS(args, kId, double, out);
1362 CHECK_ARGS(args, kName, std::string, out);
1364 auto name = args.get(kName).get<std::string>();
1365 auto id = static_cast<int>(args.get(kId).get<double>());
1367 PlatformResult result = pipeline_manager_.GetSource(id, name);
1370 LogAndReportError(result, &out);
1377 void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::object& out) {
1378 ScopeLogger("args: %s", args.serialize().c_str());
1380 if (!args.get(kId).is<double>()) {
1381 LoggerD("id is not a number");
1382 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1386 if (!args.get(kName).is<std::string>()) {
1387 LoggerD("name is not a string");
1388 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid name"}, &out);
1392 auto name = args.get(kName).get<std::string>();
1393 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1396 auto ret = pipeline_manager_.GetSwitch(name, pipeline_id, &type);
1398 LogAndReportError(ret, &out);
1402 out["type"] = picojson::value{type};
1406 void MlInstance::MLPipelineGetValve(const picojson::value& args, picojson::object& out) {
1407 ScopeLogger("args: %s", args.serialize().c_str());
1409 CHECK_ARGS(args, kId, double, out);
1410 CHECK_ARGS(args, kName, std::string, out);
1412 auto name = args.get(kName).get<std::string>();
1413 auto pipeline_id = args.get(kId).get<double>();
1415 auto ret = pipeline_manager_.GetValve(name, pipeline_id);
1417 LogAndReportError(ret, &out);
1424 void MlInstance::MLPipelineRegisterSinkListener(const picojson::value& args,
1425 picojson::object& out) {
1426 ScopeLogger("args: %s", args.serialize().c_str());
1428 CHECK_ARGS(args, kId, double, out);
1429 CHECK_ARGS(args, kName, std::string, out);
1430 CHECK_ARGS(args, kListenerName, std::string, out);
1432 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1433 auto sink_name = args.get(kName).get<std::string>();
1434 auto listener_name = args.get(kListenerName).get<std::string>();
1436 auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id, listener_name);
1438 LogAndReportError(ret, &out);
1445 void MlInstance::MLPipelineUnregisterSinkListener(const picojson::value& args,
1446 picojson::object& out) {
1447 ScopeLogger("args: %s", args.serialize().c_str());
1449 CHECK_ARGS(args, kId, double, out);
1450 CHECK_ARGS(args, kName, std::string, out);
1452 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1453 auto sink_name = args.get(kName).get<std::string>();
1455 auto ret = pipeline_manager_.UnregisterSinkListener(sink_name, pipeline_id);
1457 LogAndReportError(ret, &out);
1464 void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
1465 picojson::object& out) {
1466 ScopeLogger("args: %s", args.serialize().c_str());
1468 CHECK_ARGS(args, kName, std::string, out);
1469 CHECK_ARGS(args, kListenerName, std::string, out);
1470 CHECK_ARGS(args, kInputTensorsInfoId, double, out);
1471 CHECK_ARGS(args, kOutputTensorsInfoId, double, out);
1473 const auto& custom_filter_name = args.get(kName).get<std::string>();
1474 const auto& listener_name = args.get(kListenerName).get<std::string>();
1475 auto input_tensors_info_id = static_cast<int>(args.get(kInputTensorsInfoId).get<double>());
1476 auto output_tensors_info_id = static_cast<int>(args.get(kOutputTensorsInfoId).get<double>());
1478 TensorsInfo* input_tensors_info_ptr =
1479 GetTensorsInfoManager().GetTensorsInfo(input_tensors_info_id);
1480 if (!input_tensors_info_ptr) {
1482 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1483 ("Could not find TensorsInfo handle with given id: %d", input_tensors_info_id));
1487 TensorsInfo* output_tensors_info_ptr =
1488 GetTensorsInfoManager().GetTensorsInfo(output_tensors_info_id);
1489 if (!output_tensors_info_ptr) {
1491 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1492 ("Could not find TensorsInfo handle with given id: %d", output_tensors_info_id));
1496 auto ret = pipeline_manager_.RegisterCustomFilter(
1497 custom_filter_name, listener_name, input_tensors_info_ptr, output_tensors_info_ptr);
1499 LogAndReportError(ret, &out);
1506 void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args,
1507 picojson::object& out) {
1508 ScopeLogger("args: %s", args.serialize().c_str());
1510 CHECK_ARGS(args, kName, std::string, out);
1511 CHECK_ARGS(args, kStatus, double, out);
1512 CHECK_ARGS(args, kRequestId, double, out);
1514 const auto& custom_filter_name = args.get(kName).get<std::string>();
1515 auto status = static_cast<int>(args.get(kStatus).get<double>());
1516 auto request_id = static_cast<int>(args.get(kRequestId).get<double>());
1518 auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
1520 LogAndReportError(ret, &out);
1527 void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
1528 picojson::object& out) {
1529 ScopeLogger("args: %s", args.serialize().c_str());
1531 CHECK_ARGS(args, kName, std::string, out);
1533 const auto& custom_filter_name = args.get(kName).get<std::string>();
1535 auto ret = pipeline_manager_.UnregisterCustomFilter(custom_filter_name);
1537 LogAndReportError(ret, &out);
1544 void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out) {
1545 ScopeLogger("args: %s", args.serialize().c_str());
1547 CHECK_ARGS(args, kId, double, out);
1548 CHECK_ARGS(args, kNodeName, std::string, out);
1549 CHECK_ARGS(args, kName, std::string, out);
1550 CHECK_ARGS(args, kType, std::string, out);
1552 auto id = static_cast<int>(args.get(kId).get<double>());
1553 const auto& name = args.get(kName).get<std::string>();
1554 const auto& node_name = args.get(kNodeName).get<std::string>();
1555 const auto& type = args.get(kType).get<std::string>();
1557 PlatformResult result = pipeline_manager_.getProperty(id, node_name, name, type, &out);
1560 LogAndReportError(result, &out);
1567 void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out) {
1568 ScopeLogger("args: %s", args.serialize().c_str());
1570 CHECK_ARGS(args, kId, double, out);
1571 CHECK_ARGS(args, kNodeName, std::string, out);
1572 CHECK_ARGS(args, kName, std::string, out);
1573 CHECK_ARGS(args, kType, std::string, out);
1575 auto id = static_cast<int>(args.get(kId).get<double>());
1576 const auto& name = args.get(kName).get<std::string>();
1577 const auto& node_name = args.get(kNodeName).get<std::string>();
1578 const auto& type = args.get(kType).get<std::string>();
1580 CHECK_EXIST(args, kProperty, out);
1581 if (kBOOLEAN == type) {
1582 CHECK_TYPE(args, kProperty, bool, out, ErrorCode::TYPE_MISMATCH_ERR);
1583 } else if (kSTRING == type) {
1584 CHECK_TYPE(args, kProperty, std::string, out, ErrorCode::TYPE_MISMATCH_ERR);
1586 CHECK_TYPE(args, kProperty, double, out, ErrorCode::TYPE_MISMATCH_ERR);
1588 const picojson::value& property = args.get(kProperty);
1590 PlatformResult result = pipeline_manager_.setProperty(id, node_name, name, type, property);
1592 LogAndReportError(result, &out);
1599 void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
1600 ScopeLogger("args: [%s]", args.serialize().c_str());
1602 CHECK_ARGS(args, kId, double, out);
1603 CHECK_ARGS(args, kName, std::string, out);
1605 auto id = static_cast<int>(args.get(kId).get<double>());
1606 const auto& name = args.get(kName).get<std::string>();
1609 PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
1611 LogAndReportError(result, &out);
1618 void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson::object& out) {
1619 ScopeLogger("args: [%s]", args.serialize().c_str());
1621 CHECK_ARGS(args, kId, double, out);
1622 CHECK_ARGS(args, kName, std::string, out);
1623 CHECK_ARGS(args, kTensorsDataId, double, out);
1625 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1626 auto& source_name = args.get(kName).get<std::string>();
1627 auto tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
1629 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
1631 if (nullptr == tensors_data) {
1632 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
1633 ("Could not get TensorData handle with given id: %d", tensor_data_id));
1637 auto ret = pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
1639 LogAndReportError(ret, &out);
1646 void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out) {
1647 ScopeLogger("args: [%s]", args.serialize().c_str());
1649 CHECK_ARGS(args, kId, double, out);
1650 CHECK_ARGS(args, kName, std::string, out);
1652 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1653 auto& switch_name = args.get(kName).get<std::string>();
1655 picojson::array pad_list;
1656 auto ret = pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
1658 LogAndReportError(ret, &out);
1662 ReportSuccess(picojson::value{std::move(pad_list)}, out);
1665 void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out) {
1666 ScopeLogger("args: [%s]", args.serialize().c_str());
1668 CHECK_ARGS(args, kId, double, out);
1669 CHECK_ARGS(args, kName, std::string, out);
1670 CHECK_ARGS(args, kPadName, std::string, out);
1672 auto pipeline_id = args.get(kId).get<double>();
1673 auto& switch_name = args.get(kName).get<std::string>();
1674 auto& pad_name = args.get(kPadName).get<std::string>();
1676 auto ret = pipeline_manager_.SwitchSelect(pipeline_id, switch_name, pad_name);
1678 LogAndReportError(ret, &out);
1685 void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
1686 ScopeLogger("args: %s", args.serialize().c_str());
1688 CHECK_ARGS(args, kId, double, out);
1689 CHECK_ARGS(args, kName, std::string, out);
1690 CHECK_ARGS(args, kOpen, bool, out);
1692 auto name = args.get(kName).get<std::string>();
1693 auto pipeline_id = args.get(kId).get<double>();
1694 auto open = args.get(kOpen).get<bool>();
1696 auto ret = pipeline_manager_.ValveSetOpen(pipeline_id, name, open);
1698 LogAndReportError(ret, &out);
1705 void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
1706 ScopeLogger("args: %s", args.serialize().c_str());
1708 CHECK_ARGS(args, kId, double, out);
1709 CHECK_ARGS(args, kName, std::string, out);
1711 auto name = args.get(kName).get<std::string>();
1712 auto pipeline_id = args.get(kId).get<double>();
1715 auto ret = pipeline_manager_.ValveIsOpen(pipeline_id, name, &open);
1717 LogAndReportError(ret, &out);
1721 ReportSuccess(picojson::value{open}, out);
1724 void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out) {
1725 ScopeLogger("args: %s", args.serialize().c_str());
1726 CHECK_ARGS(args, kId, double, out);
1727 CHECK_ARGS(args, kName, std::string, out);
1728 CHECK_ARGS(args, kValue, std::string, out);
1730 auto id = static_cast<int>(args.get(kId).get<double>());
1731 auto name = args.get(kName).get<std::string>();
1732 auto value = args.get(kValue).get<std::string>();
1734 PlatformResult result = trainer_manager_.LayerSetProperty(id, name, value);
1736 ReportError(result, &out);
1742 void MlInstance::MLTrainerLayerCreate(const picojson::value& args, picojson::object& out) {
1743 ScopeLogger("args: %s", args.serialize().c_str());
1744 CHECK_ARGS(args, kType, std::string, out);
1747 ml_train_layer_type_e layer_type = ML_TRAIN_LAYER_TYPE_UNKNOWN;
1748 PlatformResult result = types::LayerTypeEnum.getValue(
1749 args.get(kType).get<std::string>(), &layer_type);
1751 LogAndReportError(result, &out);
1755 result = trainer_manager_.CreateLayer(id, layer_type);
1757 ReportError(result, &out);
1760 out[kId] = picojson::value(static_cast<double>(id));
1764 void MlInstance::MLTrainerLayerGetName(const picojson::value& args,
1765 picojson::object& out) {
1766 ScopeLogger("args: %s", args.serialize().c_str());
1767 CHECK_ARGS(args, kId, double, out);
1769 auto id = static_cast<int>(args.get(kId).get<double>());
1772 PlatformResult result = trainer_manager_.LayerGetName(id, name);
1774 ReportError(result, &out);
1778 out[kName] = picojson::value(static_cast<std::string>(name));
1782 void MlInstance::MLTrainerLayerDispose(const picojson::value& args,
1783 picojson::object& out) {
1784 ScopeLogger("args: %s", args.serialize().c_str());
1785 CHECK_ARGS(args, kId, double, out);
1787 auto id = static_cast<int>(args.get(kId).get<double>());
1789 PlatformResult result = trainer_manager_.LayerDispose(id);
1791 ReportError(result, &out);
1797 void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out) {
1798 ScopeLogger("args: %s", args.serialize().c_str());
1799 CHECK_ARGS(args, kId, double, out);
1800 CHECK_ARGS(args, kName, std::string, out);
1801 CHECK_ARGS(args, kValue, std::string, out);
1803 auto id = static_cast<int>(args.get(kId).get<double>());
1804 auto name = args.get(kName).get<std::string>();
1805 auto value = args.get(kValue).get<std::string>();
1807 PlatformResult result = trainer_manager_.OptimizerSetProperty(id, name, value);
1809 ReportError(result, &out);
1815 void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out) {
1816 ScopeLogger("args: %s", args.serialize().c_str());
1817 CHECK_ARGS(args, kType, std::string, out);
1820 ml_train_optimizer_type_e optimizer_type = ML_TRAIN_OPTIMIZER_TYPE_UNKNOWN;
1821 PlatformResult result = types::OptimizerTypeEnum.getValue(
1822 args.get(kType).get<std::string>(), &optimizer_type);
1824 LogAndReportError(result, &out);
1828 result = trainer_manager_.CreateOptimizer(id, optimizer_type);
1830 ReportError(result, &out);
1833 out[kId] = picojson::value(static_cast<double>(id));
1837 void MlInstance::MLTrainerOptimizerDispose(const picojson::value& args,
1838 picojson::object& out) {
1839 ScopeLogger("args: %s", args.serialize().c_str());
1840 CHECK_ARGS(args, kId, double, out);
1842 auto id = static_cast<int>(args.get(kId).get<double>());
1844 PlatformResult result = trainer_manager_.OptimizerDispose(id);
1846 ReportError(result, &out);
1852 void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::object& out) {
1853 ScopeLogger("args: %s", args.serialize().c_str());
1855 PlatformResult result;
1856 if (args.contains(kConfigPath)) {
1857 // create model with config file
1858 CHECK_ARGS(args, kConfigPath, std::string, out);
1859 const auto& config_path = common::tools::ConvertUriToPath(
1860 args.get(kConfigPath).get<std::string>());
1861 CHECK_STORAGE_ACCESS(config_path, &out);
1863 result = trainer_manager_.CreateModel(id, config_path);
1865 result = trainer_manager_.CreateModel(id);
1868 ReportError(result, &out);
1871 out[kId] = picojson::value(static_cast<double>(id));
1875 void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::object& out) {
1876 ScopeLogger("args: %s", args.serialize().c_str());
1877 CHECK_ARGS(args, kId, double, out);
1878 CHECK_ARGS(args, kOptions, picojson::object, out);
1880 auto id = static_cast<int>(args.get(kId).get<double>());
1881 auto options = args.get(kOptions).get<picojson::object>();
1883 PlatformResult result = trainer_manager_.ModelCompile(id, options);
1886 ReportError(result, &out);
1892 void MlInstance::MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out) {
1893 ScopeLogger("args: %s", args.serialize().c_str());
1894 CHECK_ARGS(args, kId, double, out);
1895 CHECK_ARGS(args, kLayerId, double, out);
1897 auto id = static_cast<int>(args.get(kId).get<double>());
1898 auto layerId = static_cast<int>(args.get(kLayerId).get<double>());
1900 PlatformResult result = trainer_manager_.ModelAddLayer(id, layerId);
1903 ReportError(result, &out);
1909 void MlInstance::MLTrainerModelRun(const picojson::value& args, picojson::object& out) {
1910 ScopeLogger("args: %s", args.serialize().c_str());
1911 CHECK_ARGS(args, kId, double, out);
1912 CHECK_ARGS(args, kOptions, picojson::object, out);
1913 CHECK_ARGS(args, kCallbackId, double, out);
1915 auto id = static_cast<int>(args.get(kId).get<double>());
1916 auto options = args.get(kOptions).get<picojson::object>();
1917 auto cb_id = args.get(kCallbackId).get<double>();
1919 auto async_logic = [this, id, options](decltype(out) out) {
1920 PlatformResult result;
1922 result = trainer_manager_.ModelRun(id, options);
1925 ReportError(result, &out);
1932 this->worker_.add_job([this, cb_id, async_logic] {
1933 picojson::value response = picojson::value(picojson::object());
1934 picojson::object& async_out = response.get<picojson::object>();
1935 async_out[kCallbackId] = picojson::value(cb_id);
1936 async_logic(async_out);
1937 this->PostMessage(response.serialize().c_str());
1943 void MlInstance::MLTrainerModelSummarize(const picojson::value& args, picojson::object& out) {
1944 ScopeLogger("args: %s", args.serialize().c_str());
1945 CHECK_ARGS(args, kId, double, out);
1946 CHECK_ARGS(args, kLevel, std::string, out);
1948 auto id = static_cast<int>(args.get(kId).get<double>());
1950 ml_train_summary_type_e summaryType = ML_TRAIN_SUMMARY_MODEL;
1951 PlatformResult result = types::SummaryTypeEnum.getValue(
1952 args.get(kLevel).get<std::string>(), &summaryType);
1954 LogAndReportError(result, &out);
1958 std::string summary;
1960 result = trainer_manager_.ModelSummarize(id, summaryType, summary);
1963 ReportError(result, &out);
1967 out[kSummary] = picojson::value(summary);
1971 void MlInstance::MLTrainerModelCheckMetrics(const picojson::value& args,
1972 picojson::object& out) {
1973 const std::string kTrainLoss = "trainLoss";
1974 const std::string kValidLoss = "validLoss";
1975 const std::string kValidAccuracy = "validAccuracy";
1976 ScopeLogger("args: %s", args.serialize().c_str());
1977 CHECK_ARGS(args, kId, double, out);
1978 CHECK_ARGS(args, kTrainLoss, double, out);
1979 CHECK_ARGS(args, kValidLoss, double, out);
1980 CHECK_ARGS(args, kValidAccuracy, double, out);
1982 auto id = static_cast<int>(args.get(kId).get<double>());
1983 auto train_loss = args.get(kTrainLoss).get<double>();
1984 auto valid_loss = args.get(kValidLoss).get<double>();
1985 auto valid_accuracy = args.get(kValidAccuracy).get<double>();
1987 bool as_expected = false;
1988 PlatformResult result = trainer_manager_.CheckMetrics(
1989 id, train_loss, valid_loss, valid_accuracy, &as_expected);
1991 ReportError(result, &out);
1995 ReportSuccess(picojson::value(as_expected), out);
1998 void MlInstance::MLTrainerModelSave(const picojson::value& args,
1999 picojson::object& out) {
2000 ScopeLogger("args: %s", args.serialize().c_str());
2001 CHECK_ARGS(args, kId, double, out);
2002 CHECK_ARGS(args, kSavePath, std::string, out);
2003 CHECK_ARGS(args, kSaveFormat, std::string, out);
2005 auto id = static_cast<int>(args.get(kId).get<double>());
2006 auto path = args.get(kSavePath).get<std::string>();
2008 ml_train_model_format_e model_format = ML_TRAIN_MODEL_FORMAT_INI_WITH_BIN;
2009 PlatformResult result = types::ModelSaveFormatEnum.getValue(
2010 args.get(kSaveFormat).get<std::string>(), &model_format);
2012 LogAndReportError(result, &out);
2016 result = trainer_manager_.ModelSave(id, path, model_format);
2018 ReportError(result, &out);
2024 void MlInstance::MLTrainerModelLoad(const picojson::value& args,
2025 picojson::object& out) {
2026 ScopeLogger("args: %s", args.serialize().c_str());
2027 CHECK_ARGS(args, kId, double, out);
2028 CHECK_ARGS(args, kSavePath, std::string, out);
2029 CHECK_ARGS(args, kSaveFormat, std::string, out);
2031 auto id = static_cast<int>(args.get(kId).get<double>());
2032 auto path = args.get(kSavePath).get<std::string>();
2034 ml_train_model_format_e model_format = ML_TRAIN_MODEL_FORMAT_INI_WITH_BIN;
2035 PlatformResult result = types::ModelSaveFormatEnum.getValue(
2036 args.get(kSaveFormat).get<std::string>(), &model_format);
2038 LogAndReportError(result, &out);
2042 result = trainer_manager_.ModelLoad(id, path, model_format);
2044 ReportError(result, &out);
2050 void MlInstance::MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out) {
2051 ScopeLogger("args: %s", args.serialize().c_str());
2052 CHECK_ARGS(args, kId, double, out);
2053 CHECK_ARGS(args, kDatasetId, double, out);
2055 auto id = static_cast<int>(args.get(kId).get<double>());
2056 auto datasetId = static_cast<int>(args.get(kDatasetId).get<double>());
2058 PlatformResult result = trainer_manager_.ModelSetDataset(id, datasetId);
2061 ReportError(result, &out);
2067 void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out) {
2068 ScopeLogger("args: %s", args.serialize().c_str());
2069 CHECK_ARGS(args, kId, double, out);
2070 CHECK_ARGS(args, kOptimizerId, double, out);
2072 auto id = static_cast<int>(args.get(kId).get<double>());
2073 auto optimizerId = static_cast<int>(args.get(kOptimizerId).get<double>());
2075 PlatformResult result = trainer_manager_.ModelSetOptimizer(id, optimizerId);
2078 ReportError(result, &out);
2084 void MlInstance::MLTrainerModelDispose(const picojson::value& args,
2085 picojson::object& out) {
2086 ScopeLogger("args: %s", args.serialize().c_str());
2087 CHECK_ARGS(args, kId, double, out);
2089 auto id = static_cast<int>(args.get(kId).get<double>());
2091 PlatformResult result = trainer_manager_.ModelDispose(id);
2093 ReportError(result, &out);
2099 void MlInstance::MLTrainerDatasetCreateFromFile(const picojson::value& args,
2100 picojson::object& out) {
2101 ScopeLogger("args: %s", args.serialize().c_str());
2102 CHECK_ARGS(args, kTrain, std::string, out);
2103 CHECK_ARGS(args, kValid, std::string, out);
2104 CHECK_ARGS(args, kTest, std::string, out);
2107 const std::string& train_file_path = args.get(kTrain).get<std::string>();
2108 const std::string& valid_file_path = args.get(kValid).get<std::string>();
2109 const std::string& test_file_path = args.get(kTest).get<std::string>();
2111 PlatformResult result =
2112 trainer_manager_.CreateFileDataset(id, train_file_path, valid_file_path, test_file_path);
2114 ReportError(result, &out);
2117 out[kId] = picojson::value(static_cast<double>(id));
2121 void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out) {
2122 ScopeLogger("args: %s", args.serialize().c_str());
2123 CHECK_ARGS(args, kId, double, out);
2124 CHECK_ARGS(args, kName, std::string, out);
2125 CHECK_ARGS(args, kValue, std::string, out);
2126 CHECK_ARGS(args, kMode, std::string, out);
2128 auto id = static_cast<int>(args.get(kId).get<double>());
2129 auto name = args.get(kName).get<std::string>();
2130 auto value = args.get(kValue).get<std::string>();
2132 ml_train_dataset_mode_e datasetMode = ML_TRAIN_DATASET_MODE_TRAIN;
2133 PlatformResult result = types::DatasetModeEnum.getValue(
2134 args.get(kMode).get<std::string>(), &datasetMode);
2136 LogAndReportError(result, &out);
2140 result = trainer_manager_.DatasetSetProperty(id, name, value, datasetMode);
2142 ReportError(result, &out);
2148 void MlInstance::MLTrainerDatasetDispose(const picojson::value& args,
2149 picojson::object& out) {
2150 ScopeLogger("args: %s", args.serialize().c_str());
2151 CHECK_ARGS(args, kId, double, out);
2153 auto id = static_cast<int>(args.get(kId).get<double>());
2155 PlatformResult result = trainer_manager_.DatasetDispose(id);
2157 ReportError(result, &out);
2171 } // namespace extension