2 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ml_instance.h"
20 #include "common/converter.h"
21 #include "common/logger.h"
22 #include "common/picojson.h"
23 #include "common/platform_result.h"
24 #include "common/tools.h"
26 static_assert(ML_TENSOR_RANK_LIMIT == 4,
27 "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
33 const int kCustomFilterSuccess = 0;
35 const std::string kAsync = "async";
36 const std::string kBOOLEAN = "BOOLEAN";
37 const std::string kBuffer = "buffer";
38 const std::string kCallbackId = "callbackId";
39 const std::string kDataId = "dataId";
40 const std::string kDefinition = "definition";
41 const std::string kDimensions = "dimensions";
42 const std::string kFwType = "fwType";
43 const std::string kGetInputMode = "getInputMode";
44 const std::string kHw = "hw";
45 const std::string kHwType = "hwType";
46 const std::string kId = "id";
47 const std::string kIndex = "index";
48 const std::string kInputTensorsInfoId = "inputTensorsInfoId";
49 const std::string kInTensorsInfo = "inTensorsInfo";
50 const std::string kIsDynamicMode = "isDynamicMode";
51 const std::string kListenerName = "listenerName";
52 const std::string kLocation = "location";
53 const std::string kModelPath = "modelPath";
54 const std::string kName = "name";
55 const std::string kNnfw = "nnfw";
56 const std::string kCustomRequirement = "customRequirement";
57 const std::string kNodeName = "nodeName";
58 const std::string kOpen = "open";
59 const std::string kOtherId = "otherId";
60 const std::string kOutputTensorsInfoId = "outputTensorsInfoId";
61 const std::string kOutTensorsInfo = "outTensorsInfo";
62 const std::string kPadName = "padName";
63 const std::string kPipelineStateChangeListenerName = "listenerName";
64 const std::string kProperty = "property";
65 const std::string kRequestId = "requestId";
66 const std::string kShape = "shape";
67 const std::string kSize = "size";
68 const std::string kStatus = "status";
69 const std::string kSTRING = "STRING";
70 const std::string kTensorsDataId = "tensorsDataId";
71 const std::string kTensorsInfoId = "tensorsInfoId";
72 const std::string kTimeout = "timeout";
73 const std::string kType = "type";
74 const std::string kValue = "value";
77 const std::string kTrain = "train";
78 const std::string kValid = "valid";
79 const std::string kTest = "test";
80 const std::string kOptions = "options";
83 using namespace common;
85 #define CHECK_EXIST(args, name, out) \
86 if (!args.contains(name)) { \
87 std::string msg = std::string(name) + " is required argument"; \
88 LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
92 // CHECK_TYPE will throw AbortError by default, but it can be changed by providing
93 // additional parameter to the macro, i.e.:
94 // CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
95 #define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
96 #define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
97 #define CHECK_TYPE_5(args, name, type, out, error_type) \
98 if (!args.get(name).is<type>()) { \
99 std::string msg = std::string(name) + " has invalid type"; \
100 LogAndReportError(PlatformResult(error_type, msg), &out); \
103 #define CHECK_TYPE_4(args, name, type, out) \
104 CHECK_TYPE_5(args, name, type, out, ErrorCode::ABORT_ERR)
106 #define CHECK_ARGS(args, name, type, out) \
107 CHECK_EXIST(args, name, out) \
108 CHECK_TYPE(args, name, type, out)
110 MlInstance::MlInstance()
111 : tensors_info_manager_{&tensors_data_manager_},
112 single_manager_{&tensors_info_manager_},
113 pipeline_manager_{this, &tensors_info_manager_, &tensors_data_manager_} {
115 using namespace std::placeholders;
117 #define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
118 #define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
119 #define REGISTER_METHOD_WITH_BINARY_ANWSER(M) \
120 RegisterSyncHandlerWithBinaryAnswer(#M, std::bind(&MlInstance::M, this, _1, _2))
122 REGISTER_METHOD(MLCheckNNFWAvailability);
123 REGISTER_METHOD(MLTensorsInfoCountGetter);
124 REGISTER_METHOD(MLTensorsInfoAddTensorInfo);
125 REGISTER_METHOD(MLTensorsInfoCreate);
126 REGISTER_METHOD(MLTensorsInfoGetDimensions);
127 REGISTER_METHOD(MLTensorsInfoSetDimensions);
128 REGISTER_METHOD(MLTensorsInfoGetTensorName);
129 REGISTER_METHOD(MLTensorsInfoSetTensorName);
130 REGISTER_METHOD(MLTensorsInfoGetTensorSize);
131 REGISTER_METHOD(MLTensorsInfoGetTensorType);
132 REGISTER_METHOD(MLTensorsInfoSetTensorType);
133 REGISTER_METHOD(MLTensorsInfoGetTensorsData);
134 REGISTER_METHOD(MLTensorsInfoClone);
135 REGISTER_METHOD(MLTensorsInfoEquals);
136 REGISTER_METHOD(MLTensorsInfoDispose);
137 REGISTER_METHOD(MLPipelineValveSetOpen);
138 REGISTER_METHOD(MLPipelineValveIsOpen);
140 REGISTER_METHOD(MLTensorsDataDispose);
141 REGISTER_METHOD(MLTensorsDataGetTensorRawData);
142 REGISTER_METHOD_WITH_BINARY_ANWSER(MLTensorsDataGetTensorRawDataBinary);
143 REGISTER_METHOD(MLTensorsDataGetTensorType);
144 REGISTER_METHOD(MLTensorsDataSetTensorRawData);
145 REGISTER_BINARY_METHOD(MLTensorsDataSetTensorRawDataBinary);
147 REGISTER_METHOD(MLSingleManagerOpenModel);
148 REGISTER_METHOD(MLSingleShotGetTensorsInfo);
149 REGISTER_METHOD(MLSingleShotSetInputInfo);
150 REGISTER_METHOD(MLSingleShotInvoke);
151 REGISTER_METHOD(MLSingleShotGetValue);
152 REGISTER_METHOD(MLSingleShotSetValue);
153 REGISTER_METHOD(MLSingleShotSetTimeout);
154 REGISTER_METHOD(MLSingleShotClose);
156 REGISTER_METHOD(MLPipelineManagerCreatePipeline);
157 REGISTER_METHOD(MLPipelineGetState);
158 REGISTER_METHOD(MLPipelineDispose);
159 REGISTER_METHOD(MLPipelineStart);
160 REGISTER_METHOD(MLPipelineStop);
161 REGISTER_METHOD(MLPipelineGetNodeInfo);
162 REGISTER_METHOD(MLPipelineGetSwitch);
163 REGISTER_METHOD(MLPipelineSwitchGetPadList);
164 REGISTER_METHOD(MLPipelineSwitchSelect);
165 REGISTER_METHOD(MLPipelineGetValve);
166 REGISTER_METHOD(MLPipelineNodeInfoGetProperty);
167 REGISTER_METHOD(MLPipelineNodeInfoSetProperty);
168 REGISTER_METHOD(MLPipelineGetSource);
169 REGISTER_METHOD(MLPipelineGetInputTensorsInfo);
170 REGISTER_METHOD(MLPipelineSourceInputData);
171 REGISTER_METHOD(MLPipelineRegisterSinkListener);
172 REGISTER_METHOD(MLPipelineUnregisterSinkListener);
173 REGISTER_METHOD(MLPipelineManagerRegisterCustomFilter);
174 REGISTER_METHOD(MLPipelineManagerCustomFilterOutput);
175 REGISTER_METHOD(MLPipelineManagerUnregisterCustomFilter);
177 REGISTER_METHOD(MLTrainerLayerSetProperty);
178 REGISTER_METHOD(MLTrainerLayerCreate);
179 REGISTER_METHOD(MLTrainerOptimizerSetProperty);
180 REGISTER_METHOD(MLTrainerOptimizerCreate);
181 REGISTER_METHOD(MLTrainerModelCreate);
182 REGISTER_METHOD(MLTrainerModelCompile);
183 REGISTER_METHOD(MLTrainerModelAddLayer);
184 REGISTER_METHOD(MLTrainerModelRun);
185 REGISTER_METHOD(MLTrainerModelSummarize);
186 REGISTER_METHOD(MLTrainerModelSetDataset);
187 REGISTER_METHOD(MLTrainerModelSetOptimizer);
188 REGISTER_METHOD(MLTrainerDatasetCreateGenerator);
189 REGISTER_METHOD(MLTrainerDatasetCreateFromFile);
190 REGISTER_METHOD(MLTrainerDatasetSetProperty);
192 #undef REGISTER_METHOD
195 MlInstance::~MlInstance() {
200 TensorsInfoManager& MlInstance::GetTensorsInfoManager() {
201 return tensors_info_manager_;
204 TensorsDataManager& MlInstance::GetTensorsDataManager() {
205 return tensors_data_manager_;
208 void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
209 ScopeLogger("args: %s", args.serialize().c_str());
210 CHECK_EXIST(args, kNnfw, out)
211 CHECK_EXIST(args, kHw, out)
212 CHECK_EXIST(args, kCustomRequirement, out)
214 std::string nnfw = args.get(kNnfw).get<std::string>();
215 std::string hw = args.get(kHw).get<std::string>();
216 optional<std::string> customRequirement;
217 if (args.get(kCustomRequirement).is<std::string>()) {
218 customRequirement = args.get(kCustomRequirement).get<std::string>();
220 bool availability_val = util::CheckNNFWAvailability(nnfw, hw, customRequirement);
222 picojson::value available = picojson::value{availability_val};
223 ReportSuccess(available, out);
226 void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
227 ScopeLogger("args: %s", args.serialize().c_str());
229 TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
230 if (nullptr == tensorsInfo) {
231 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
232 ("Could not create new TensorsInfo handle"));
235 out[kId] = picojson::value(static_cast<double>(tensorsInfo->Id()));
239 void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
240 ScopeLogger("args: %s", args.serialize().c_str());
241 CHECK_ARGS(args, kTensorsInfoId, double, out);
243 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
244 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
245 if (nullptr == tensorsInfo) {
246 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
247 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
250 unsigned int count = 0;
251 PlatformResult result = tensorsInfo->NativeGetCount(&count);
253 ReportError(result, &out);
256 picojson::value val = picojson::value{static_cast<double>(count)};
257 ReportSuccess(val, out);
260 void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
261 ScopeLogger("args: %s", args.serialize().c_str());
262 CHECK_ARGS(args, kTensorsInfoId, double, out);
263 CHECK_ARGS(args, kType, std::string, out);
265 CHECK_EXIST(args, kName, out);
266 CHECK_ARGS(args, kDimensions, picojson::array, out);
268 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
269 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
270 if (nullptr == tensorsInfo) {
271 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
272 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
276 const std::string& tensorType = args.get(kType).get<std::string>();
277 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
278 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
280 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
282 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
287 if (args.get(kName).is<std::string>()) {
288 name = args.get(kName).get<std::string>();
289 LoggerD("name: %s", name.c_str());
292 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
293 auto dim = args.get(kDimensions).get<picojson::array>();
294 result = util::GetDimensionsFromJsonArray(dim, dimensions);
296 LogAndReportError(result, &out);
300 result = tensorsInfo->AddTensorInfo(name, tensorTypeEnum, dimensions);
302 LogAndReportError(result, &out);
306 int count = tensorsInfo->Count() - 1;
308 picojson::value val = picojson::value{static_cast<double>(count)};
309 ReportSuccess(val, out);
312 void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out) {
313 ScopeLogger("args: %s", args.serialize().c_str());
314 CHECK_ARGS(args, kTensorsInfoId, double, out);
315 CHECK_ARGS(args, kIndex, double, out);
317 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
318 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
319 if (nullptr == tensorsInfo) {
320 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
321 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
324 int index = static_cast<int>(args.get(kIndex).get<double>());
325 unsigned int dim[ML_TENSOR_RANK_LIMIT];
326 PlatformResult result = tensorsInfo->NativeGetTensorDimensions(index, dim);
328 LogAndReportError(result, &out);
331 picojson::array array = picojson::array{};
332 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
336 array.push_back(picojson::value{static_cast<double>(dim[i])});
338 picojson::value val = picojson::value{array};
339 ReportSuccess(val, out);
342 void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out) {
343 ScopeLogger("args: %s", args.serialize().c_str());
344 CHECK_ARGS(args, kTensorsInfoId, double, out);
345 CHECK_ARGS(args, kIndex, double, out);
346 CHECK_ARGS(args, kDimensions, picojson::array, out);
348 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
349 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
350 if (nullptr == tensorsInfo) {
351 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
352 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
356 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
357 auto dim = args.get(kDimensions).get<picojson::array>();
358 PlatformResult result = util::GetDimensionsFromJsonArray(dim, dimensions);
360 LogAndReportError(result, &out);
364 int index = static_cast<int>(args.get(kIndex).get<double>());
365 result = tensorsInfo->NativeSetTensorDimensions(index, dimensions);
367 LogAndReportError(result, &out);
373 void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out) {
374 ScopeLogger("args: %s", args.serialize().c_str());
375 CHECK_ARGS(args, kTensorsInfoId, double, out);
376 CHECK_ARGS(args, kIndex, double, out);
378 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
379 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
380 if (nullptr == tensorsInfo) {
381 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
382 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
385 int index = static_cast<int>(args.get(kIndex).get<double>());
387 PlatformResult result = tensorsInfo->NativeGetTensorName(index, &name);
389 LogAndReportError(result, &out);
392 picojson::value val = picojson::value{name};
393 ReportSuccess(val, out);
396 void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out) {
397 ScopeLogger("args: %s", args.serialize().c_str());
398 CHECK_ARGS(args, kTensorsInfoId, double, out);
399 CHECK_ARGS(args, kIndex, double, out);
400 CHECK_ARGS(args, kName, std::string, out);
402 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
403 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
404 if (nullptr == tensorsInfo) {
405 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
406 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
410 int index = static_cast<int>(args.get(kIndex).get<double>());
411 const std::string& name = args.get(kName).get<std::string>();
412 PlatformResult result = tensorsInfo->NativeSetTensorName(index, name);
414 LogAndReportError(result, &out);
420 void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out) {
421 ScopeLogger("args: %s", args.serialize().c_str());
422 CHECK_ARGS(args, kTensorsInfoId, double, out);
423 CHECK_ARGS(args, kIndex, double, out);
425 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
426 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
427 if (nullptr == tensorsInfo) {
428 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
429 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
432 int index = static_cast<int>(args.get(kIndex).get<double>());
434 PlatformResult result = tensorsInfo->NativeGetTensorSize(index, &size);
436 LogAndReportError(result, &out);
440 picojson::value val = picojson::value{static_cast<double>(size)};
441 ReportSuccess(val, out);
444 void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out) {
445 ScopeLogger("args: %s", args.serialize().c_str());
446 CHECK_ARGS(args, kTensorsInfoId, double, out);
447 CHECK_ARGS(args, kIndex, double, out);
449 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
450 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
451 if (nullptr == tensorsInfo) {
452 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
453 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
457 int index = static_cast<int>(args.get(kIndex).get<double>());
458 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
459 PlatformResult result = tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
461 LogAndReportError(result, &out);
464 std::string tensorTypeString;
465 result = types::TensorTypeEnum.getName(tensorTypeEnum, &tensorTypeString);
467 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
469 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
473 picojson::value val = picojson::value{tensorTypeString};
474 ReportSuccess(val, out);
477 void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out) {
478 ScopeLogger("args: %s", args.serialize().c_str());
479 CHECK_ARGS(args, kTensorsInfoId, double, out);
480 CHECK_ARGS(args, kIndex, double, out);
481 CHECK_ARGS(args, kType, std::string, out);
483 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
484 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
485 if (nullptr == tensorsInfo) {
486 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
487 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
491 const std::string& tensorType = args.get(kType).get<std::string>();
492 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
493 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
495 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
497 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
501 int index = static_cast<int>(args.get(kIndex).get<double>());
502 result = tensorsInfo->NativeSetTensorType(index, tensorTypeEnum);
504 LogAndReportError(result, &out);
510 void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
511 ScopeLogger("args: %s", args.serialize().c_str());
512 CHECK_ARGS(args, kTensorsInfoId, double, out);
514 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
515 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
516 if (nullptr == tensorsInfo) {
517 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
518 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
522 TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
524 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
525 ("Could not create TensorsData"));
529 out[kTensorsDataId] = picojson::value(static_cast<double>(tensorsData->Id()));
530 out[kTensorsInfoId] = picojson::value(static_cast<double>(tensorsData->TensorsInfoId()));
534 void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
535 ScopeLogger("args: %s", args.serialize().c_str());
536 CHECK_ARGS(args, kTensorsInfoId, double, out);
538 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
539 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
540 if (nullptr == tensorsInfo) {
541 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
542 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
546 TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
547 if (nullptr == cloned) {
548 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
549 ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
553 out["id"] = picojson::value(static_cast<double>(cloned->Id()));
557 void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
558 ScopeLogger("args: %s", args.serialize().c_str());
559 CHECK_ARGS(args, kTensorsInfoId, double, out);
560 CHECK_ARGS(args, kOtherId, double, out);
562 int firstId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
563 int secondId = static_cast<int>(args.get(kOtherId).get<double>());
565 TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
566 if (nullptr == first) {
567 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
568 ("Could not find TensorsInfo handle with given id: %d", firstId));
572 TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
573 if (nullptr == second) {
574 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
575 ("Could not find TensorsInfo handle with given id: %d", secondId));
579 bool equals = first->Equals(second);
580 picojson::value val = picojson::value{equals};
581 ReportSuccess(val, out);
584 void MlInstance::MLTensorsInfoDispose(const picojson::value& args, picojson::object& out) {
585 ScopeLogger("args: %s", args.serialize().c_str());
586 CHECK_ARGS(args, kTensorsInfoId, double, out);
587 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
589 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
591 LogAndReportError(result, &out);
596 void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::object& out) {
597 ScopeLogger("args: %s", args.serialize().c_str());
598 CHECK_ARGS(args, kTensorsDataId, double, out);
599 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
601 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
602 if (nullptr == tensors_data) {
603 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
604 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
608 if (!tensors_data->DisposableFromJS()) {
613 // Dispose underlying tensorsInfo
614 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
616 LogAndReportError(result, &out);
620 result = GetTensorsDataManager().DisposeTensorsData(tensors_data_id);
622 LogAndReportError(result, &out);
628 void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
629 ScopeLogger("args: %s", args.serialize().c_str());
630 CHECK_ARGS(args, kTensorsDataId, double, out);
631 CHECK_ARGS(args, kIndex, double, out);
632 CHECK_ARGS(args, kLocation, picojson::array, out);
633 CHECK_ARGS(args, kSize, picojson::array, out);
635 int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
636 int index = static_cast<int>(args.get(kIndex).get<double>());
638 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
639 if (nullptr == tensors_data) {
640 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
641 ("Could not find TensorsData handle with given id: %d", tensor_data_id));
645 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
646 PlatformResult result =
647 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
649 LogAndReportError(result, &out);
653 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
654 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
656 LogAndReportError(result, &out);
660 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
661 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
664 LogAndReportError(result, &out);
668 TensorRawData raw_data;
669 result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
671 LogAndReportError(result, &out);
675 std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
676 out[kBuffer] = picojson::value(picojson::string_type, true);
677 common::encode_binary_in_string(out_data, out[kBuffer].get<std::string>());
679 out[kType] = picojson::value(raw_data.type_str);
680 picojson::array shape = picojson::array{};
681 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
682 shape.push_back(picojson::value{static_cast<double>(raw_data.shape[i])});
684 out[kShape] = picojson::value{shape};
689 void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
690 std::vector<uint8_t>* out) {
691 ScopeLogger("args: %s", args.serialize().c_str());
692 // TODO handle errors to out
693 // CHECK_ARGS(args, kTensorsDataId, double, out);
694 // CHECK_ARGS(args, kIndex, double, out);
695 // CHECK_ARGS(args, kLocation, picojson::array, out);
696 // CHECK_ARGS(args, kSize, picojson::array, out);
698 int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
699 int index = static_cast<int>(args.get(kIndex).get<double>());
701 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
702 if (nullptr == tensors_data) {
703 LoggerE("Could not find TensorsData handle with given id: %d", tensor_data_id);
704 tools::ReportErrorToBinary(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
709 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
710 PlatformResult result =
711 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
713 LoggerE("Reporting error.");
714 tools::ReportErrorToBinary(result, out);
718 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
719 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
721 LoggerE("Reporting error.");
722 tools::ReportErrorToBinary(result, out);
726 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
727 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
730 LoggerE("Reporting error.");
731 tools::ReportErrorToBinary(result, out);
735 TensorRawData raw_data;
736 result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
738 LoggerE("Reporting error.");
739 tools::ReportErrorToBinary(result, out);
743 picojson::value result_json = picojson::value(picojson::object());
744 auto& out_json = result_json.get<picojson::object>();
746 out_json[kType] = picojson::value(raw_data.type_str);
747 picojson::array shape = picojson::array{};
748 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
749 shape.push_back(picojson::value{static_cast<double>(raw_data.shape[i])});
751 out_json[kShape] = picojson::value{shape};
753 std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
756 // 4 byte === JSON lenght (N)
757 // N bytest === JSON data
758 tools::ReportSuccessToBinary(result_json, out);
759 // 4 byte === buffer length (M)
760 // M bytes === buffer data
761 tools::ReportDataToBinary(out_data, out);
764 void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
765 ScopeLogger("args: %s", args.serialize().c_str());
766 CHECK_ARGS(args, kTensorsDataId, double, out);
767 CHECK_ARGS(args, kIndex, double, out);
769 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
770 int index = static_cast<int>(args.get(kIndex).get<double>());
772 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
773 if (nullptr == tensors_data) {
774 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
775 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
779 std::string tensor_type_string;
780 PlatformResult result =
781 types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
783 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
785 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
789 picojson::value val = picojson::value{tensor_type_string};
790 ReportSuccess(val, out);
793 void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
795 CHECK_ARGS(args, kTensorsDataId, double, out);
796 CHECK_ARGS(args, kIndex, double, out);
797 CHECK_ARGS(args, kBuffer, std::string, out);
798 CHECK_ARGS(args, kLocation, picojson::array, out);
799 CHECK_ARGS(args, kSize, picojson::array, out);
800 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
801 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
802 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
803 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
805 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
806 int index = static_cast<int>(args.get(kIndex).get<double>());
808 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
809 if (nullptr == tensors_data) {
810 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
811 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
815 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
816 PlatformResult result =
817 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
819 LogAndReportError(result, &out);
823 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
824 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
826 LogAndReportError(result, &out);
830 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
831 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
834 LogAndReportError(result, &out);
837 const std::string& str_buffer = args.get(kBuffer).get<std::string>();
838 std::vector<std::uint8_t> buffer;
839 common::decode_binary_from_string(str_buffer, buffer);
841 TensorRawData raw_data{buffer.data(), buffer.size()};
842 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
844 LogAndReportError(result, &out);
851 void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size,
852 picojson::object& out) {
855 METHOD_ID WAS ALREADY REMOVED during message handling
856 other data packed with following format:
858 // 1 byte === methodIndex /// already parsed
859 // 4 byte === JSON lenght (N)
860 // 4 byte === buffer length (M)
861 // N bytest === JSON data
862 // M bytes === buffer data
864 unsigned int call_args_len_begin = 0;
865 unsigned int call_args_len = static_cast<unsigned int>(
866 (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) +
867 (data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3]));
869 unsigned int buffer_len_begin = call_args_len_begin + 4;
870 unsigned int buffer_len = static_cast<unsigned int>(
871 (data[buffer_len_begin] << 24) + (data[buffer_len_begin + 1] << 16) +
872 (data[buffer_len_begin + 2] << 8) + (data[buffer_len_begin + 3]));
874 unsigned int call_args_begin = buffer_len_begin + 4;
875 std::string call_args(data + call_args_begin, call_args_len);
877 picojson::value args;
878 picojson::parse(args, call_args);
880 unsigned int buffer_begin = call_args_begin + call_args_len;
882 CHECK_ARGS(args, kTensorsDataId, double, out);
883 CHECK_ARGS(args, kIndex, double, out);
884 CHECK_ARGS(args, kLocation, picojson::array, out);
885 CHECK_ARGS(args, kSize, picojson::array, out);
886 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
887 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
888 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
889 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
891 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
892 int index = static_cast<int>(args.get(kIndex).get<double>());
894 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
895 if (nullptr == tensors_data) {
896 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
897 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
901 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
902 PlatformResult result =
903 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
905 LogAndReportError(result, &out);
909 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
910 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
912 LogAndReportError(result, &out);
916 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
917 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
920 LogAndReportError(result, &out);
924 TensorRawData raw_data{reinterpret_cast<uint8_t*>(const_cast<char*>(data + buffer_begin)),
926 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
928 LogAndReportError(result, &out);
935 void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) {
936 ScopeLogger("args: %s", args.serialize().c_str());
937 CHECK_ARGS(args, kModelPath, std::string, out);
938 CHECK_ARGS(args, kInTensorsInfo, double, out);
939 CHECK_ARGS(args, kOutTensorsInfo, double, out);
940 CHECK_ARGS(args, kFwType, std::string, out);
941 CHECK_ARGS(args, kHwType, std::string, out);
942 CHECK_ARGS(args, kIsDynamicMode, bool, out);
944 const auto& model_path = common::tools::ConvertUriToPath(args.get(kModelPath).get<std::string>());
945 CHECK_STORAGE_ACCESS(model_path, &out);
947 TensorsInfo* in_tensors_info = nullptr;
948 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
949 if (kNoId != inTensorId) {
950 in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
951 if (nullptr == in_tensors_info) {
952 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
953 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
958 TensorsInfo* out_tensors_info = nullptr;
959 auto outTensorId = static_cast<int>(args.get(kOutTensorsInfo).get<double>());
960 if (kNoId != outTensorId) {
961 out_tensors_info = GetTensorsInfoManager().GetTensorsInfo(outTensorId);
962 if (nullptr == out_tensors_info) {
963 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
964 ("Could not find TensorsInfo handle with given id: %d", outTensorId));
969 ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
970 PlatformResult result =
971 types::NNFWTypeEnum.getValue(args.get(kFwType).get<std::string>(), &nnfw_e);
973 LogAndReportError(result, &out);
977 ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
978 result = types::HWTypeEnum.getValue(args.get(kHwType).get<std::string>(), &hw_e);
980 LogAndReportError(result, &out);
984 auto is_dynamic_mode = args.get(kIsDynamicMode).get<bool>();
986 auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
987 is_dynamic_mode](decltype(out) out) {
988 PlatformResult result = common::tools::CheckFileAvailability(model_path);
991 PlatformResult(ErrorCode::NOT_FOUND_ERR, "File does not exist or is not accessible"),
992 &out, ("File does not exist or is not accessible: %s", model_path.c_str()));
997 result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
998 is_dynamic_mode, &res_id);
1000 ReportError(result, &out);
1004 out[kId] = picojson::value(static_cast<double>(res_id));
1009 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
1015 CHECK_ARGS(args, kCallbackId, double, out);
1016 double callback_id = args.get(kCallbackId).get<double>();
1017 this->worker_.add_job([this, callback_id, logic] {
1018 picojson::value response = picojson::value(picojson::object());
1019 picojson::object& async_out = response.get<picojson::object>();
1020 async_out[kCallbackId] = picojson::value(callback_id);
1022 this->PostMessage(response.serialize().c_str());
1030 void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out) {
1031 ScopeLogger("args: %s", args.serialize().c_str());
1032 CHECK_ARGS(args, kId, double, out);
1033 CHECK_ARGS(args, kGetInputMode, bool, out);
1035 auto id = static_cast<int>(args.get(kId).get<double>());
1036 // true means gathering input data; false means gathering output data
1037 auto get_input_mode = static_cast<int>(args.get(kGetInputMode).get<bool>());
1040 auto ret = single_manager_.GetNativeTensorsInfo(id, get_input_mode, &res_id);
1042 ReportError(ret, &out);
1046 out[kId] = picojson::value(static_cast<double>(res_id));
1050 void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out) {
1051 ScopeLogger("args: %s", args.serialize().c_str());
1052 CHECK_ARGS(args, kId, double, out);
1053 CHECK_ARGS(args, kInTensorsInfo, double, out);
1055 auto id = static_cast<int>(args.get(kId).get<double>());
1056 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
1058 TensorsInfo* in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
1059 if (nullptr == in_tensors_info) {
1060 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1061 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
1065 TensorsInfo* clone = GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
1067 auto ret = single_manager_.SetNativeInputInfo(id, in_tensors_info);
1069 ReportError(ret, &out);
1073 out[kId] = picojson::value(static_cast<double>(clone->Id()));
1077 void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::object& out) {
1078 ScopeLogger("args: %s", args.serialize().c_str());
1079 CHECK_ARGS(args, kId, double, out);
1080 CHECK_ARGS(args, kTensorsDataId, double, out);
1082 int id = static_cast<int>(args.get(kId).get<double>());
1083 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
1085 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
1087 TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
1088 if (async && in_tensors_data) {
1089 // in case of async flow need to prevent destroying entry data during invoke
1090 // from JS, creation of a copy
1091 in_tensors_data = GetTensorsInfoManager().CloneNativeTensorWithData(
1092 in_tensors_data->GetTensorsInfo()->Handle(), in_tensors_data->Handle());
1094 if (nullptr == in_tensors_data) {
1095 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
1096 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
1100 auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
1101 TensorsData* out_tensors_data = nullptr;
1102 auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
1104 // in case of async flow, the in_tensor_data with underlying TensorsInfo
1105 // was copied, thus need to be released here
1106 GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
1107 GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
1110 ReportError(ret, &out);
1114 out[kTensorsDataId] = picojson::value(static_cast<double>(out_tensors_data->Id()));
1115 out[kTensorsInfoId] = picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
1122 CHECK_ARGS(args, kCallbackId, double, out);
1123 double callback_id = args.get(kCallbackId).get<double>();
1124 this->worker_.add_job([this, callback_id, logic] {
1125 picojson::value response = picojson::value(picojson::object());
1126 picojson::object& async_out = response.get<picojson::object>();
1127 async_out[kCallbackId] = picojson::value(callback_id);
1129 this->PostMessage(response.serialize().c_str());
1137 void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {
1138 ScopeLogger("args: %s", args.serialize().c_str());
1139 CHECK_ARGS(args, kId, double, out);
1140 CHECK_ARGS(args, kName, std::string, out);
1142 auto id = static_cast<int>(args.get(kId).get<double>());
1143 const auto& name = args.get(kName).get<std::string>();
1145 auto ret = single_manager_.GetValue(id, name, value);
1147 ReportError(ret, &out);
1151 picojson::value val = picojson::value{value};
1152 ReportSuccess(val, out);
1155 void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::object& out) {
1156 ScopeLogger("args: %s", args.serialize().c_str());
1157 CHECK_ARGS(args, kId, double, out);
1158 CHECK_ARGS(args, kName, std::string, out);
1159 CHECK_ARGS(args, kValue, std::string, out);
1161 auto id = static_cast<int>(args.get(kId).get<double>());
1162 const auto& name = args.get(kName).get<std::string>();
1163 const auto& value = args.get(kValue).get<std::string>();
1165 auto ret = single_manager_.SetValue(id, name, value);
1167 ReportError(ret, &out);
1174 void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out) {
1175 ScopeLogger("args: %s", args.serialize().c_str());
1176 CHECK_ARGS(args, kId, double, out);
1177 CHECK_ARGS(args, kTimeout, double, out);
1179 auto id = static_cast<int>(args.get(kId).get<double>());
1180 auto timeout = static_cast<unsigned long>(args.get(kTimeout).get<double>());
1182 auto ret = single_manager_.SetTimeout(id, timeout);
1184 ReportError(ret, &out);
1191 void MlInstance::MLSingleShotClose(const picojson::value& args, picojson::object& out) {
1192 ScopeLogger("args: %s", args.serialize().c_str());
1193 CHECK_ARGS(args, kId, double, out);
1195 auto id = static_cast<int>(args.get(kId).get<double>());
1197 auto ret = single_manager_.Close(id);
1199 ReportError(ret, &out);
1208 bool CreatePipelineArgumentsAreInvalid(const picojson::value& args) {
1211 auto arguments_valid = args.get(kId).is<double>();
1212 arguments_valid &= args.get(kDefinition).is<std::string>();
1213 arguments_valid &= (args.get(kPipelineStateChangeListenerName).is<std::string>() ||
1214 args.get(kPipelineStateChangeListenerName).is<picojson::null>());
1215 LoggerD("CreatePipeline arguments are %s", arguments_valid ? "valid" : "invalid");
1217 return !arguments_valid;
1222 void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
1223 picojson::object& out) {
1224 ScopeLogger("args: %s", args.serialize().c_str());
1226 if (CreatePipelineArgumentsAreInvalid(args)) {
1227 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"}, &out);
1231 auto id = static_cast<int>(args.get(kId).get<double>());
1232 auto definition = args.get(kDefinition).get<std::string>();
1233 auto state_change_listener_name =
1234 args.get(kPipelineStateChangeListenerName).is<std::string>()
1235 ? args.get(kPipelineStateChangeListenerName).get<std::string>()
1238 auto ret = pipeline_manager_.CreatePipeline(id, definition, state_change_listener_name);
1241 ReportError(ret, &out);
1248 void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::object& out) {
1249 ScopeLogger("args: %s", args.serialize().c_str());
1251 if (!args.get(kId).is<double>()) {
1252 LoggerD("id is not a number");
1253 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1256 auto id = static_cast<int>(args.get(kId).get<double>());
1258 picojson::value state_value{std::string{}};
1259 std::string* state_ptr = &state_value.get<std::string>();
1260 auto ret = pipeline_manager_.GetPipelineState(id, state_ptr);
1262 ReportError(ret, &out);
1266 ReportSuccess(state_value, out);
1269 void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object& out) {
1270 ScopeLogger("args: %s", args.serialize().c_str());
1272 CHECK_ARGS(args, kId, double, out);
1274 auto id = static_cast<int>(args.get(kId).get<double>());
1276 PlatformResult result = pipeline_manager_.Start(id);
1279 ReportError(result, &out);
1286 void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& out) {
1287 ScopeLogger("args: %s", args.serialize().c_str());
1289 CHECK_ARGS(args, kId, double, out);
1291 auto id = static_cast<int>(args.get(kId).get<double>());
1293 PlatformResult result = pipeline_manager_.Stop(id);
1296 LogAndReportError(result, &out);
1303 void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object& out) {
1304 ScopeLogger("args: %s", args.serialize().c_str());
1306 if (!args.get(kId).is<double>()) {
1307 LoggerD("id is not a number");
1308 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1312 auto id = static_cast<int>(args.get(kId).get<double>());
1313 auto ret = pipeline_manager_.DisposePipeline(id);
1316 ReportError(ret, &out);
1323 void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out) {
1324 ScopeLogger("args: %s", args.serialize().c_str());
1326 CHECK_ARGS(args, kId, double, out);
1327 CHECK_ARGS(args, kName, std::string, out);
1329 auto name = args.get(kName).get<std::string>();
1330 auto id = static_cast<int>(args.get(kId).get<double>());
1332 PlatformResult result = pipeline_manager_.GetNodeInfo(id, name);
1335 LogAndReportError(result, &out);
1342 void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
1343 ScopeLogger("args: %s", args.serialize().c_str());
1345 CHECK_ARGS(args, kId, double, out);
1346 CHECK_ARGS(args, kName, std::string, out);
1348 auto name = args.get(kName).get<std::string>();
1349 auto id = static_cast<int>(args.get(kId).get<double>());
1351 PlatformResult result = pipeline_manager_.GetSource(id, name);
1354 LogAndReportError(result, &out);
1361 void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::object& out) {
1362 ScopeLogger("args: %s", args.serialize().c_str());
1364 if (!args.get(kId).is<double>()) {
1365 LoggerD("id is not a number");
1366 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1370 if (!args.get(kName).is<std::string>()) {
1371 LoggerD("name is not a string");
1372 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid name"}, &out);
1376 auto name = args.get(kName).get<std::string>();
1377 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1380 auto ret = pipeline_manager_.GetSwitch(name, pipeline_id, &type);
1382 LogAndReportError(ret, &out);
1386 out["type"] = picojson::value{type};
1390 void MlInstance::MLPipelineGetValve(const picojson::value& args, picojson::object& out) {
1391 ScopeLogger("args: %s", args.serialize().c_str());
1393 CHECK_ARGS(args, kId, double, out);
1394 CHECK_ARGS(args, kName, std::string, out);
1396 auto name = args.get(kName).get<std::string>();
1397 auto pipeline_id = args.get(kId).get<double>();
1399 auto ret = pipeline_manager_.GetValve(name, pipeline_id);
1401 LogAndReportError(ret, &out);
1408 void MlInstance::MLPipelineRegisterSinkListener(const picojson::value& args,
1409 picojson::object& out) {
1410 ScopeLogger("args: %s", args.serialize().c_str());
1412 CHECK_ARGS(args, kId, double, out);
1413 CHECK_ARGS(args, kName, std::string, out);
1414 CHECK_ARGS(args, kListenerName, std::string, out);
1416 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1417 auto sink_name = args.get(kName).get<std::string>();
1418 auto listener_name = args.get(kListenerName).get<std::string>();
1420 auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id, listener_name);
1422 LogAndReportError(ret, &out);
1429 void MlInstance::MLPipelineUnregisterSinkListener(const picojson::value& args,
1430 picojson::object& out) {
1431 ScopeLogger("args: %s", args.serialize().c_str());
1433 CHECK_ARGS(args, kId, double, out);
1434 CHECK_ARGS(args, kName, std::string, out);
1436 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1437 auto sink_name = args.get(kName).get<std::string>();
1439 auto ret = pipeline_manager_.UnregisterSinkListener(sink_name, pipeline_id);
1441 LogAndReportError(ret, &out);
1448 void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
1449 picojson::object& out) {
1450 ScopeLogger("args: %s", args.serialize().c_str());
1452 CHECK_ARGS(args, kName, std::string, out);
1453 CHECK_ARGS(args, kListenerName, std::string, out);
1454 CHECK_ARGS(args, kInputTensorsInfoId, double, out);
1455 CHECK_ARGS(args, kOutputTensorsInfoId, double, out);
1457 const auto& custom_filter_name = args.get(kName).get<std::string>();
1458 const auto& listener_name = args.get(kListenerName).get<std::string>();
1459 auto input_tensors_info_id = static_cast<int>(args.get(kInputTensorsInfoId).get<double>());
1460 auto output_tensors_info_id = static_cast<int>(args.get(kOutputTensorsInfoId).get<double>());
1462 TensorsInfo* input_tensors_info_ptr =
1463 GetTensorsInfoManager().GetTensorsInfo(input_tensors_info_id);
1464 if (!input_tensors_info_ptr) {
1466 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1467 ("Could not find TensorsInfo handle with given id: %d", input_tensors_info_id));
1471 TensorsInfo* output_tensors_info_ptr =
1472 GetTensorsInfoManager().GetTensorsInfo(output_tensors_info_id);
1473 if (!output_tensors_info_ptr) {
1475 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1476 ("Could not find TensorsInfo handle with given id: %d", output_tensors_info_id));
1480 auto ret = pipeline_manager_.RegisterCustomFilter(
1481 custom_filter_name, listener_name, input_tensors_info_ptr, output_tensors_info_ptr);
1483 LogAndReportError(ret, &out);
1490 void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args,
1491 picojson::object& out) {
1492 ScopeLogger("args: %s", args.serialize().c_str());
1494 CHECK_ARGS(args, kName, std::string, out);
1495 CHECK_ARGS(args, kStatus, double, out);
1496 CHECK_ARGS(args, kRequestId, double, out);
1498 const auto& custom_filter_name = args.get(kName).get<std::string>();
1499 auto status = static_cast<int>(args.get(kStatus).get<double>());
1500 auto request_id = static_cast<int>(args.get(kRequestId).get<double>());
1502 auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
1504 LogAndReportError(ret, &out);
1511 void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
1512 picojson::object& out) {
1513 ScopeLogger("args: %s", args.serialize().c_str());
1515 CHECK_ARGS(args, kName, std::string, out);
1517 const auto& custom_filter_name = args.get(kName).get<std::string>();
1519 auto ret = pipeline_manager_.UnregisterCustomFilter(custom_filter_name);
1521 LogAndReportError(ret, &out);
1528 void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out) {
1529 ScopeLogger("args: %s", args.serialize().c_str());
1531 CHECK_ARGS(args, kId, double, out);
1532 CHECK_ARGS(args, kNodeName, std::string, out);
1533 CHECK_ARGS(args, kName, std::string, out);
1534 CHECK_ARGS(args, kType, std::string, out);
1536 auto id = static_cast<int>(args.get(kId).get<double>());
1537 const auto& name = args.get(kName).get<std::string>();
1538 const auto& node_name = args.get(kNodeName).get<std::string>();
1539 const auto& type = args.get(kType).get<std::string>();
1541 PlatformResult result = pipeline_manager_.getProperty(id, node_name, name, type, &out);
1544 LogAndReportError(result, &out);
1551 void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out) {
1552 ScopeLogger("args: %s", args.serialize().c_str());
1554 CHECK_ARGS(args, kId, double, out);
1555 CHECK_ARGS(args, kNodeName, std::string, out);
1556 CHECK_ARGS(args, kName, std::string, out);
1557 CHECK_ARGS(args, kType, std::string, out);
1559 auto id = static_cast<int>(args.get(kId).get<double>());
1560 const auto& name = args.get(kName).get<std::string>();
1561 const auto& node_name = args.get(kNodeName).get<std::string>();
1562 const auto& type = args.get(kType).get<std::string>();
1564 CHECK_EXIST(args, kProperty, out);
1565 if (kBOOLEAN == type) {
1566 CHECK_TYPE(args, kProperty, bool, out, ErrorCode::TYPE_MISMATCH_ERR);
1567 } else if (kSTRING == type) {
1568 CHECK_TYPE(args, kProperty, std::string, out, ErrorCode::TYPE_MISMATCH_ERR);
1570 CHECK_TYPE(args, kProperty, double, out, ErrorCode::TYPE_MISMATCH_ERR);
1572 const picojson::value& property = args.get(kProperty);
1574 PlatformResult result = pipeline_manager_.setProperty(id, node_name, name, type, property);
1576 LogAndReportError(result, &out);
1583 void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
1584 ScopeLogger("args: [%s]", args.serialize().c_str());
1586 CHECK_ARGS(args, kId, double, out);
1587 CHECK_ARGS(args, kName, std::string, out);
1589 auto id = static_cast<int>(args.get(kId).get<double>());
1590 const auto& name = args.get(kName).get<std::string>();
1593 PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
1595 LogAndReportError(result, &out);
1602 void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson::object& out) {
1603 ScopeLogger("args: [%s]", args.serialize().c_str());
1605 CHECK_ARGS(args, kId, double, out);
1606 CHECK_ARGS(args, kName, std::string, out);
1607 CHECK_ARGS(args, kTensorsDataId, double, out);
1609 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1610 auto& source_name = args.get(kName).get<std::string>();
1611 auto tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
1613 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
1615 if (nullptr == tensors_data) {
1616 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
1617 ("Could not get TensorData handle with given id: %d", tensor_data_id));
1621 auto ret = pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
1623 LogAndReportError(ret, &out);
1630 void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out) {
1631 ScopeLogger("args: [%s]", args.serialize().c_str());
1633 CHECK_ARGS(args, kId, double, out);
1634 CHECK_ARGS(args, kName, std::string, out);
1636 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1637 auto& switch_name = args.get(kName).get<std::string>();
1639 picojson::array pad_list;
1640 auto ret = pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
1642 LogAndReportError(ret, &out);
1646 ReportSuccess(picojson::value{std::move(pad_list)}, out);
1649 void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out) {
1650 ScopeLogger("args: [%s]", args.serialize().c_str());
1652 CHECK_ARGS(args, kId, double, out);
1653 CHECK_ARGS(args, kName, std::string, out);
1654 CHECK_ARGS(args, kPadName, std::string, out);
1656 auto pipeline_id = args.get(kId).get<double>();
1657 auto& switch_name = args.get(kName).get<std::string>();
1658 auto& pad_name = args.get(kPadName).get<std::string>();
1660 auto ret = pipeline_manager_.SwitchSelect(pipeline_id, switch_name, pad_name);
1662 LogAndReportError(ret, &out);
1669 void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
1670 ScopeLogger("args: %s", args.serialize().c_str());
1672 CHECK_ARGS(args, kId, double, out);
1673 CHECK_ARGS(args, kName, std::string, out);
1674 CHECK_ARGS(args, kOpen, bool, out);
1676 auto name = args.get(kName).get<std::string>();
1677 auto pipeline_id = args.get(kId).get<double>();
1678 auto open = args.get(kOpen).get<bool>();
1680 auto ret = pipeline_manager_.ValveSetOpen(pipeline_id, name, open);
1682 LogAndReportError(ret, &out);
1689 void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
1690 ScopeLogger("args: %s", args.serialize().c_str());
1692 CHECK_ARGS(args, kId, double, out);
1693 CHECK_ARGS(args, kName, std::string, out);
1695 auto name = args.get(kName).get<std::string>();
1696 auto pipeline_id = args.get(kId).get<double>();
1699 auto ret = pipeline_manager_.ValveIsOpen(pipeline_id, name, &open);
1701 LogAndReportError(ret, &out);
1705 ReportSuccess(picojson::value{open}, out);
1708 void MlInstance::MLTrainerLayerSetProperty(const picojson::value& args, picojson::object& out) {
1709 ScopeLogger("args: %s", args.serialize().c_str());
1710 CHECK_ARGS(args, kId, double, out);
1711 CHECK_ARGS(args, kName, std::string, out);
1712 CHECK_ARGS(args, kValue, std::string, out);
1714 auto id = static_cast<int>(args.get(kId).get<double>());
1715 auto name = args.get(kName).get<std::string>();
1716 auto value = args.get(kValue).get<std::string>();
1718 PlatformResult result = trainer_manager_.LayerSetProperty(id, name, value);
1720 ReportError(result, &out);
1726 void MlInstance::MLTrainerLayerCreate(const picojson::value& args, picojson::object& out) {
1727 ScopeLogger("args: %s", args.serialize().c_str());
1728 CHECK_ARGS(args, kType, std::string, out);
1731 LayerType layer_type = LayerType::LAYER_UNKNOWN;
1732 PlatformResult result =
1733 types::LayerTypeEnum.getValue(args.get(kType).get<std::string>(), &layer_type);
1735 LogAndReportError(result, &out);
1739 result = trainer_manager_.CreateLayer(id, layer_type);
1741 ReportError(result, &out);
1744 out[kId] = picojson::value(static_cast<double>(id));
1748 void MlInstance::MLTrainerOptimizerSetProperty(const picojson::value& args, picojson::object& out) {
1749 ScopeLogger("args: %s", args.serialize().c_str());
1750 CHECK_ARGS(args, kId, double, out);
1751 CHECK_ARGS(args, kName, std::string, out);
1752 CHECK_ARGS(args, kValue, std::string, out);
1754 auto id = static_cast<int>(args.get(kId).get<double>());
1755 auto name = args.get(kName).get<std::string>();
1756 auto value = args.get(kValue).get<std::string>();
1758 PlatformResult result = trainer_manager_.OptimizerSetProperty(id, name, value);
1760 ReportError(result, &out);
1766 void MlInstance::MLTrainerOptimizerCreate(const picojson::value& args, picojson::object& out) {
1767 ScopeLogger("args: %s", args.serialize().c_str());
1768 CHECK_ARGS(args, kType, std::string, out);
1771 OptimizerType optimizer_type = OptimizerType::UNKNOWN;
1772 PlatformResult result =
1773 types::OptimizerTypeEnum.getValue(args.get(kType).get<std::string>(), &optimizer_type);
1775 LogAndReportError(result, &out);
1779 result = trainer_manager_.CreateOptimizer(id, optimizer_type);
1781 ReportError(result, &out);
1784 out[kId] = picojson::value(static_cast<double>(id));
1788 void MlInstance::MLTrainerModelCreate(const picojson::value& args, picojson::object& out) {
1789 ScopeLogger("args: %s", args.serialize().c_str());
1791 PlatformResult result;
1792 if (args.contains(kModelPath)) {
1793 // create model with config file
1794 CHECK_ARGS(args, kModelPath, std::string, out);
1795 const auto& config_path =
1796 common::tools::ConvertUriToPath(args.get(kModelPath).get<std::string>());
1797 CHECK_STORAGE_ACCESS(config_path, &out);
1799 result = trainer_manager_.CreateModel(id, config_path);
1801 result = trainer_manager_.CreateModel(id);
1804 ReportError(result, &out);
1807 out[kId] = picojson::value(static_cast<double>(id));
1811 void MlInstance::MLTrainerModelCompile(const picojson::value& args, picojson::object& out) {
1812 ScopeLogger("args: %s", args.serialize().c_str());
1813 CHECK_ARGS(args, kId, double, out);
1814 CHECK_ARGS(args, kOptions, picojson::object, out);
1816 auto options = args.get(kOptions).get<picojson::object>();
1817 auto id = static_cast<int>(args.get(kId).get<double>());
1819 PlatformResult result = trainer_manager_.ModelCompile(id, options);
1822 ReportError(result, &out);
1828 void MlInstance::MLTrainerModelAddLayer(const picojson::value& args, picojson::object& out) {
1832 void MlInstance::MLTrainerModelRun(const picojson::value& args, picojson::object& out) {
1836 void MlInstance::MLTrainerModelSummarize(const picojson::value& args, picojson::object& out) {
1840 void MlInstance::MLTrainerModelSetDataset(const picojson::value& args, picojson::object& out) {
1844 void MlInstance::MLTrainerModelSetOptimizer(const picojson::value& args, picojson::object& out) {
1848 void MlInstance::MLTrainerDatasetCreateGenerator(const picojson::value& args,
1849 picojson::object& out) {
1853 void MlInstance::MLTrainerDatasetCreateFromFile(const picojson::value& args,
1854 picojson::object& out) {
1855 ScopeLogger("args: %s", args.serialize().c_str());
1856 CHECK_ARGS(args, kTrain, std::string, out);
1857 CHECK_ARGS(args, kValid, std::string, out);
1858 CHECK_ARGS(args, kTest, std::string, out);
1861 const std::string& train_file_path = args.get(kTrain).get<std::string>();
1862 const std::string& valid_file_path = args.get(kValid).get<std::string>();
1863 const std::string& test_file_path = args.get(kTest).get<std::string>();
1865 PlatformResult result =
1866 trainer_manager_.CreateFileDataset(id, train_file_path, valid_file_path, test_file_path);
1868 ReportError(result, &out);
1871 out[kId] = picojson::value(static_cast<double>(id));
1875 void MlInstance::MLTrainerDatasetSetProperty(const picojson::value& args, picojson::object& out) {
1876 ScopeLogger("args: %s", args.serialize().c_str());
1877 CHECK_ARGS(args, kId, double, out);
1878 CHECK_ARGS(args, kName, std::string, out);
1879 CHECK_ARGS(args, kValue, std::string, out);
1881 auto id = static_cast<int>(args.get(kId).get<double>());
1882 auto name = args.get(kName).get<std::string>();
1883 auto value = args.get(kValue).get<std::string>();
1885 PlatformResult result = trainer_manager_.DatasetSetProperty(id, name, value);
1887 ReportError(result, &out);
1901 } // namespace extension