2 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ml_instance.h"
20 #include "common/converter.h"
21 #include "common/logger.h"
22 #include "common/picojson.h"
23 #include "common/platform_result.h"
24 #include "common/tools.h"
26 static_assert(ML_TENSOR_RANK_LIMIT == 4,
27 "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
33 const int kCustomFilterSuccess = 0;
35 const std::string kAsync = "async";
36 const std::string kBOOLEAN = "BOOLEAN";
37 const std::string kBuffer = "buffer";
38 const std::string kCallbackId = "callbackId";
39 const std::string kDataId = "dataId";
40 const std::string kDefinition = "definition";
41 const std::string kDimensions = "dimensions";
42 const std::string kFwType = "fwType";
43 const std::string kGetInputMode = "getInputMode";
44 const std::string kHw = "hw";
45 const std::string kHwType = "hwType";
46 const std::string kId = "id";
47 const std::string kIndex = "index";
48 const std::string kInputTensorsInfoId = "inputTensorsInfoId";
49 const std::string kInTensorsInfo = "inTensorsInfo";
50 const std::string kIsDynamicMode = "isDynamicMode";
51 const std::string kListenerName = "listenerName";
52 const std::string kLocation = "location";
53 const std::string kModelPath = "modelPath";
54 const std::string kName = "name";
55 const std::string kNnfw = "nnfw";
56 const std::string kNodeName = "nodeName";
57 const std::string kOpen = "open";
58 const std::string kOtherId = "otherId";
59 const std::string kOutputTensorsInfoId = "outputTensorsInfoId";
60 const std::string kOutTensorsInfo = "outTensorsInfo";
61 const std::string kPadName = "padName";
62 const std::string kPipelineStateChangeListenerName = "listenerName";
63 const std::string kProperty = "property";
64 const std::string kRequestId = "requestId";
65 const std::string kShape = "shape";
66 const std::string kSize = "size";
67 const std::string kStatus = "status";
68 const std::string kSTRING = "STRING";
69 const std::string kTensorsDataId = "tensorsDataId";
70 const std::string kTensorsInfoId = "tensorsInfoId";
71 const std::string kTimeout = "timeout";
72 const std::string kType = "type";
73 const std::string kValue = "value";
76 using namespace common;
78 #define CHECK_EXIST(args, name, out) \
79 if (!args.contains(name)) { \
80 std::string msg = std::string(name) + " is required argument"; \
81 LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
85 // CHECK_TYPE will throw AbortError by default, but it can be changed by providing
86 // additional parameter to the macro, i.e.:
87 // CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
88 #define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
89 #define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
90 #define CHECK_TYPE_5(args, name, type, out, error_type) \
91 if (!args.get(name).is<type>()) { \
92 std::string msg = std::string(name) + " has invalid type"; \
93 LogAndReportError(PlatformResult(error_type, msg), &out); \
96 #define CHECK_TYPE_4(args, name, type, out) \
97 CHECK_TYPE_5(args, name, type, out, ErrorCode::ABORT_ERR)
99 #define CHECK_ARGS(args, name, type, out) \
100 CHECK_EXIST(args, name, out) \
101 CHECK_TYPE(args, name, type, out)
103 MlInstance::MlInstance()
104 : tensors_info_manager_{&tensors_data_manager_},
105 single_manager_{&tensors_info_manager_},
106 pipeline_manager_{this, &tensors_info_manager_, &tensors_data_manager_} {
108 using namespace std::placeholders;
110 #define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
111 #define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
112 #define REGISTER_METHOD_WITH_BINARY_ANWSER(M) \
113 RegisterSyncHandlerWithBinaryAnswer(#M, std::bind(&MlInstance::M, this, _1, _2))
115 REGISTER_METHOD(MLCheckNNFWAvailability);
116 REGISTER_METHOD(MLTensorsInfoCountGetter);
117 REGISTER_METHOD(MLTensorsInfoAddTensorInfo);
118 REGISTER_METHOD(MLTensorsInfoCreate);
119 REGISTER_METHOD(MLTensorsInfoGetDimensions);
120 REGISTER_METHOD(MLTensorsInfoSetDimensions);
121 REGISTER_METHOD(MLTensorsInfoGetTensorName);
122 REGISTER_METHOD(MLTensorsInfoSetTensorName);
123 REGISTER_METHOD(MLTensorsInfoGetTensorSize);
124 REGISTER_METHOD(MLTensorsInfoGetTensorType);
125 REGISTER_METHOD(MLTensorsInfoSetTensorType);
126 REGISTER_METHOD(MLTensorsInfoGetTensorsData);
127 REGISTER_METHOD(MLTensorsInfoClone);
128 REGISTER_METHOD(MLTensorsInfoEquals);
129 REGISTER_METHOD(MLTensorsInfoDispose);
130 REGISTER_METHOD(MLPipelineValveSetOpen);
131 REGISTER_METHOD(MLPipelineValveIsOpen);
133 REGISTER_METHOD(MLTensorsDataDispose);
134 REGISTER_METHOD(MLTensorsDataGetTensorRawData);
135 REGISTER_METHOD_WITH_BINARY_ANWSER(MLTensorsDataGetTensorRawDataBinary);
136 REGISTER_METHOD(MLTensorsDataGetTensorType);
137 REGISTER_METHOD(MLTensorsDataSetTensorRawData);
138 REGISTER_BINARY_METHOD(MLTensorsDataSetTensorRawDataBinary);
140 REGISTER_METHOD(MLSingleManagerOpenModel);
141 REGISTER_METHOD(MLSingleShotGetTensorsInfo);
142 REGISTER_METHOD(MLSingleShotSetInputInfo);
143 REGISTER_METHOD(MLSingleShotInvoke);
144 REGISTER_METHOD(MLSingleShotGetValue);
145 REGISTER_METHOD(MLSingleShotSetValue);
146 REGISTER_METHOD(MLSingleShotSetTimeout);
147 REGISTER_METHOD(MLSingleShotClose);
149 REGISTER_METHOD(MLPipelineManagerCreatePipeline);
150 REGISTER_METHOD(MLPipelineGetState);
151 REGISTER_METHOD(MLPipelineDispose);
152 REGISTER_METHOD(MLPipelineStart);
153 REGISTER_METHOD(MLPipelineStop);
154 REGISTER_METHOD(MLPipelineGetNodeInfo);
155 REGISTER_METHOD(MLPipelineGetSwitch);
156 REGISTER_METHOD(MLPipelineSwitchGetPadList);
157 REGISTER_METHOD(MLPipelineSwitchSelect);
158 REGISTER_METHOD(MLPipelineGetValve);
159 REGISTER_METHOD(MLPipelineNodeInfoGetProperty);
160 REGISTER_METHOD(MLPipelineNodeInfoSetProperty);
161 REGISTER_METHOD(MLPipelineGetSource);
162 REGISTER_METHOD(MLPipelineGetInputTensorsInfo);
163 REGISTER_METHOD(MLPipelineSourceInputData);
164 REGISTER_METHOD(MLPipelineRegisterSinkListener);
165 REGISTER_METHOD(MLPipelineUnregisterSinkListener);
166 REGISTER_METHOD(MLPipelineManagerRegisterCustomFilter);
167 REGISTER_METHOD(MLPipelineManagerCustomFilterOutput);
168 REGISTER_METHOD(MLPipelineManagerUnregisterCustomFilter);
170 #undef REGISTER_METHOD
173 MlInstance::~MlInstance() {
178 TensorsInfoManager& MlInstance::GetTensorsInfoManager() {
179 return tensors_info_manager_;
182 TensorsDataManager& MlInstance::GetTensorsDataManager() {
183 return tensors_data_manager_;
186 void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
187 ScopeLogger("args: %s", args.serialize().c_str());
188 CHECK_EXIST(args, kNnfw, out)
189 CHECK_EXIST(args, kHw, out)
191 std::string nnfw = args.get(kNnfw).get<std::string>();
192 std::string hw = args.get(kHw).get<std::string>();
193 bool availability_val = util::CheckNNFWAvailability(nnfw, hw);
195 picojson::value available = picojson::value{availability_val};
196 ReportSuccess(available, out);
199 void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
200 ScopeLogger("args: %s", args.serialize().c_str());
202 TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
203 if (nullptr == tensorsInfo) {
204 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
205 ("Could not create new TensorsInfo handle"));
208 out[kId] = picojson::value(static_cast<double>(tensorsInfo->Id()));
212 void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
213 ScopeLogger("args: %s", args.serialize().c_str());
214 CHECK_ARGS(args, kTensorsInfoId, double, out);
216 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
217 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
218 if (nullptr == tensorsInfo) {
219 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
220 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
223 unsigned int count = 0;
224 PlatformResult result = tensorsInfo->NativeGetCount(&count);
226 ReportError(result, &out);
229 picojson::value val = picojson::value{static_cast<double>(count)};
230 ReportSuccess(val, out);
233 void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
234 ScopeLogger("args: %s", args.serialize().c_str());
235 CHECK_ARGS(args, kTensorsInfoId, double, out);
236 CHECK_ARGS(args, kType, std::string, out);
238 CHECK_EXIST(args, kName, out);
239 CHECK_ARGS(args, kDimensions, picojson::array, out);
241 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
242 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
243 if (nullptr == tensorsInfo) {
244 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
245 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
249 const std::string& tensorType = args.get(kType).get<std::string>();
250 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
251 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
253 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
255 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
260 if (args.get(kName).is<std::string>()) {
261 name = args.get(kName).get<std::string>();
262 LoggerD("name: %s", name.c_str());
265 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
266 auto dim = args.get(kDimensions).get<picojson::array>();
267 result = util::GetDimensionsFromJsonArray(dim, dimensions);
269 LogAndReportError(result, &out);
273 result = tensorsInfo->AddTensorInfo(name, tensorTypeEnum, dimensions);
275 LogAndReportError(result, &out);
279 int count = tensorsInfo->Count() - 1;
281 picojson::value val = picojson::value{static_cast<double>(count)};
282 ReportSuccess(val, out);
285 void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out) {
286 ScopeLogger("args: %s", args.serialize().c_str());
287 CHECK_ARGS(args, kTensorsInfoId, double, out);
288 CHECK_ARGS(args, kIndex, double, out);
290 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
291 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
292 if (nullptr == tensorsInfo) {
293 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
294 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
297 int index = static_cast<int>(args.get(kIndex).get<double>());
298 unsigned int dim[ML_TENSOR_RANK_LIMIT];
299 PlatformResult result = tensorsInfo->NativeGetTensorDimensions(index, dim);
301 LogAndReportError(result, &out);
304 picojson::array array = picojson::array{};
305 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
309 array.push_back(picojson::value{static_cast<double>(dim[i])});
311 picojson::value val = picojson::value{array};
312 ReportSuccess(val, out);
315 void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out) {
316 ScopeLogger("args: %s", args.serialize().c_str());
317 CHECK_ARGS(args, kTensorsInfoId, double, out);
318 CHECK_ARGS(args, kIndex, double, out);
319 CHECK_ARGS(args, kDimensions, picojson::array, out);
321 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
322 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
323 if (nullptr == tensorsInfo) {
324 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
325 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
329 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
330 auto dim = args.get(kDimensions).get<picojson::array>();
331 PlatformResult result = util::GetDimensionsFromJsonArray(dim, dimensions);
333 LogAndReportError(result, &out);
337 int index = static_cast<int>(args.get(kIndex).get<double>());
338 result = tensorsInfo->NativeSetTensorDimensions(index, dimensions);
340 LogAndReportError(result, &out);
346 void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out) {
347 ScopeLogger("args: %s", args.serialize().c_str());
348 CHECK_ARGS(args, kTensorsInfoId, double, out);
349 CHECK_ARGS(args, kIndex, double, out);
351 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
352 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
353 if (nullptr == tensorsInfo) {
354 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
355 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
358 int index = static_cast<int>(args.get(kIndex).get<double>());
360 PlatformResult result = tensorsInfo->NativeGetTensorName(index, &name);
362 LogAndReportError(result, &out);
365 picojson::value val = picojson::value{name};
366 ReportSuccess(val, out);
369 void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out) {
370 ScopeLogger("args: %s", args.serialize().c_str());
371 CHECK_ARGS(args, kTensorsInfoId, double, out);
372 CHECK_ARGS(args, kIndex, double, out);
373 CHECK_ARGS(args, kName, std::string, out);
375 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
376 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
377 if (nullptr == tensorsInfo) {
378 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
379 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
383 int index = static_cast<int>(args.get(kIndex).get<double>());
384 const std::string& name = args.get(kName).get<std::string>();
385 PlatformResult result = tensorsInfo->NativeSetTensorName(index, name);
387 LogAndReportError(result, &out);
393 void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out) {
394 ScopeLogger("args: %s", args.serialize().c_str());
395 CHECK_ARGS(args, kTensorsInfoId, double, out);
396 CHECK_ARGS(args, kIndex, double, out);
398 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
399 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
400 if (nullptr == tensorsInfo) {
401 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
402 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
405 int index = static_cast<int>(args.get(kIndex).get<double>());
407 PlatformResult result = tensorsInfo->NativeGetTensorSize(index, &size);
409 LogAndReportError(result, &out);
413 picojson::value val = picojson::value{static_cast<double>(size)};
414 ReportSuccess(val, out);
417 void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out) {
418 ScopeLogger("args: %s", args.serialize().c_str());
419 CHECK_ARGS(args, kTensorsInfoId, double, out);
420 CHECK_ARGS(args, kIndex, double, out);
422 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
423 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
424 if (nullptr == tensorsInfo) {
425 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
426 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
430 int index = static_cast<int>(args.get(kIndex).get<double>());
431 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
432 PlatformResult result = tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
434 LogAndReportError(result, &out);
437 std::string tensorTypeString;
438 result = types::TensorTypeEnum.getName(tensorTypeEnum, &tensorTypeString);
440 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
442 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
446 picojson::value val = picojson::value{tensorTypeString};
447 ReportSuccess(val, out);
450 void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out) {
451 ScopeLogger("args: %s", args.serialize().c_str());
452 CHECK_ARGS(args, kTensorsInfoId, double, out);
453 CHECK_ARGS(args, kIndex, double, out);
454 CHECK_ARGS(args, kType, std::string, out);
456 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
457 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
458 if (nullptr == tensorsInfo) {
459 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
460 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
464 const std::string& tensorType = args.get(kType).get<std::string>();
465 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
466 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
468 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
470 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
474 int index = static_cast<int>(args.get(kIndex).get<double>());
475 result = tensorsInfo->NativeSetTensorType(index, tensorTypeEnum);
477 LogAndReportError(result, &out);
483 void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
484 ScopeLogger("args: %s", args.serialize().c_str());
485 CHECK_ARGS(args, kTensorsInfoId, double, out);
487 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
488 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
489 if (nullptr == tensorsInfo) {
490 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
491 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
495 TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
497 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
498 ("Could not create TensorsData"));
502 out[kTensorsDataId] = picojson::value(static_cast<double>(tensorsData->Id()));
503 out[kTensorsInfoId] = picojson::value(static_cast<double>(tensorsData->TensorsInfoId()));
507 void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
508 ScopeLogger("args: %s", args.serialize().c_str());
509 CHECK_ARGS(args, kTensorsInfoId, double, out);
511 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
512 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
513 if (nullptr == tensorsInfo) {
514 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
515 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
519 TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
520 if (nullptr == cloned) {
521 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
522 ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
526 out["id"] = picojson::value(static_cast<double>(cloned->Id()));
530 void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
531 ScopeLogger("args: %s", args.serialize().c_str());
532 CHECK_ARGS(args, kTensorsInfoId, double, out);
533 CHECK_ARGS(args, kOtherId, double, out);
535 int firstId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
536 int secondId = static_cast<int>(args.get(kOtherId).get<double>());
538 TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
539 if (nullptr == first) {
540 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
541 ("Could not find TensorsInfo handle with given id: %d", firstId));
545 TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
546 if (nullptr == second) {
547 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
548 ("Could not find TensorsInfo handle with given id: %d", secondId));
552 bool equals = first->Equals(second);
553 picojson::value val = picojson::value{equals};
554 ReportSuccess(val, out);
557 void MlInstance::MLTensorsInfoDispose(const picojson::value& args, picojson::object& out) {
558 ScopeLogger("args: %s", args.serialize().c_str());
559 CHECK_ARGS(args, kTensorsInfoId, double, out);
560 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
562 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
564 LogAndReportError(result, &out);
569 void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::object& out) {
570 ScopeLogger("args: %s", args.serialize().c_str());
571 CHECK_ARGS(args, kTensorsDataId, double, out);
572 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
574 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
575 if (nullptr == tensors_data) {
576 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
577 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
581 if (!tensors_data->DisposableFromJS()) {
586 // Dispose underlying tensorsInfo
587 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
589 LogAndReportError(result, &out);
593 result = GetTensorsDataManager().DisposeTensorsData(tensors_data_id);
595 LogAndReportError(result, &out);
601 void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
602 ScopeLogger("args: %s", args.serialize().c_str());
603 CHECK_ARGS(args, kTensorsDataId, double, out);
604 CHECK_ARGS(args, kIndex, double, out);
605 CHECK_ARGS(args, kLocation, picojson::array, out);
606 CHECK_ARGS(args, kSize, picojson::array, out);
608 int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
609 int index = static_cast<int>(args.get(kIndex).get<double>());
611 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
612 if (nullptr == tensors_data) {
613 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
614 ("Could not find TensorsData handle with given id: %d", tensor_data_id));
618 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
619 PlatformResult result =
620 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
622 LogAndReportError(result, &out);
626 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
627 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
629 LogAndReportError(result, &out);
633 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
634 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
637 LogAndReportError(result, &out);
641 TensorRawData raw_data;
642 result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
644 LogAndReportError(result, &out);
648 std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
649 out[kBuffer] = picojson::value(picojson::string_type, true);
650 common::encode_binary_in_string(out_data, out[kBuffer].get<std::string>());
652 out[kType] = picojson::value(raw_data.type_str);
653 picojson::array shape = picojson::array{};
654 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
655 shape.push_back(picojson::value{static_cast<double>(raw_data.shape[i])});
657 out[kShape] = picojson::value{shape};
662 void MlInstance::MLTensorsDataGetTensorRawDataBinary(const picojson::value& args,
663 std::vector<uint8_t>* out) {
664 ScopeLogger("args: %s", args.serialize().c_str());
665 // TODO handle errors to out
666 // CHECK_ARGS(args, kTensorsDataId, double, out);
667 // CHECK_ARGS(args, kIndex, double, out);
668 // CHECK_ARGS(args, kLocation, picojson::array, out);
669 // CHECK_ARGS(args, kSize, picojson::array, out);
671 int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
672 int index = static_cast<int>(args.get(kIndex).get<double>());
674 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
675 if (nullptr == tensors_data) {
676 LoggerE("Could not find TensorsData handle with given id: %d", tensor_data_id);
677 tools::ReportErrorToBinary(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"),
682 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
683 PlatformResult result =
684 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
686 LoggerE("Reporting error.");
687 tools::ReportErrorToBinary(result, out);
691 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
692 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
694 LoggerE("Reporting error.");
695 tools::ReportErrorToBinary(result, out);
699 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
700 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
703 LoggerE("Reporting error.");
704 tools::ReportErrorToBinary(result, out);
708 TensorRawData raw_data;
709 result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
711 LoggerE("Reporting error.");
712 tools::ReportErrorToBinary(result, out);
716 picojson::value result_json = picojson::value(picojson::object());
717 auto& out_json = result_json.get<picojson::object>();
719 out_json[kType] = picojson::value(raw_data.type_str);
720 picojson::array shape = picojson::array{};
721 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
722 shape.push_back(picojson::value{static_cast<double>(raw_data.shape[i])});
724 out_json[kShape] = picojson::value{shape};
726 std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
729 // 4 byte === JSON lenght (N)
730 // N bytest === JSON data
731 tools::ReportSuccessToBinary(result_json, out);
732 // 4 byte === buffer length (M)
733 // M bytes === buffer data
734 tools::ReportDataToBinary(out_data, out);
737 void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
738 ScopeLogger("args: %s", args.serialize().c_str());
739 CHECK_ARGS(args, kTensorsDataId, double, out);
740 CHECK_ARGS(args, kIndex, double, out);
742 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
743 int index = static_cast<int>(args.get(kIndex).get<double>());
745 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
746 if (nullptr == tensors_data) {
747 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
748 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
752 std::string tensor_type_string;
753 PlatformResult result =
754 types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
756 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
758 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
762 picojson::value val = picojson::value{tensor_type_string};
763 ReportSuccess(val, out);
766 void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
768 CHECK_ARGS(args, kTensorsDataId, double, out);
769 CHECK_ARGS(args, kIndex, double, out);
770 CHECK_ARGS(args, kBuffer, std::string, out);
771 CHECK_ARGS(args, kLocation, picojson::array, out);
772 CHECK_ARGS(args, kSize, picojson::array, out);
773 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
774 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
775 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
776 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
778 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
779 int index = static_cast<int>(args.get(kIndex).get<double>());
781 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
782 if (nullptr == tensors_data) {
783 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
784 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
788 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
789 PlatformResult result =
790 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
792 LogAndReportError(result, &out);
796 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
797 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
799 LogAndReportError(result, &out);
803 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
804 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
807 LogAndReportError(result, &out);
810 const std::string& str_buffer = args.get(kBuffer).get<std::string>();
811 std::vector<std::uint8_t> buffer;
812 common::decode_binary_from_string(str_buffer, buffer);
814 TensorRawData raw_data{buffer.data(), buffer.size()};
815 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
817 LogAndReportError(result, &out);
824 void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size,
825 picojson::object& out) {
828 METHOD_ID WAS ALREADY REMOVED during message handling
829 other data packed with following format:
831 // 1 byte === methodIndex /// already parsed
832 // 4 byte === JSON lenght (N)
833 // 4 byte === buffer length (M)
834 // N bytest === JSON data
835 // M bytes === buffer data
837 unsigned int call_args_len_begin = 0;
838 unsigned int call_args_len = static_cast<unsigned int>(
839 (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) +
840 (data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3]));
842 unsigned int buffer_len_begin = call_args_len_begin + 4;
843 unsigned int buffer_len = static_cast<unsigned int>(
844 (data[buffer_len_begin] << 24) + (data[buffer_len_begin + 1] << 16) +
845 (data[buffer_len_begin + 2] << 8) + (data[buffer_len_begin + 3]));
847 unsigned int call_args_begin = buffer_len_begin + 4;
848 std::string call_args(data + call_args_begin, call_args_len);
850 picojson::value args;
851 picojson::parse(args, call_args);
853 unsigned int buffer_begin = call_args_begin + call_args_len;
855 CHECK_ARGS(args, kTensorsDataId, double, out);
856 CHECK_ARGS(args, kIndex, double, out);
857 CHECK_ARGS(args, kLocation, picojson::array, out);
858 CHECK_ARGS(args, kSize, picojson::array, out);
859 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
860 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
861 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
862 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
864 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
865 int index = static_cast<int>(args.get(kIndex).get<double>());
867 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
868 if (nullptr == tensors_data) {
869 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
870 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
874 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
875 PlatformResult result =
876 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
878 LogAndReportError(result, &out);
882 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
883 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
885 LogAndReportError(result, &out);
889 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
890 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
893 LogAndReportError(result, &out);
897 TensorRawData raw_data{reinterpret_cast<uint8_t*>(const_cast<char*>(data + buffer_begin)),
899 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
901 LogAndReportError(result, &out);
908 void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) {
909 ScopeLogger("args: %s", args.serialize().c_str());
910 CHECK_ARGS(args, kModelPath, std::string, out);
911 CHECK_ARGS(args, kInTensorsInfo, double, out);
912 CHECK_ARGS(args, kOutTensorsInfo, double, out);
913 CHECK_ARGS(args, kFwType, std::string, out);
914 CHECK_ARGS(args, kHwType, std::string, out);
915 CHECK_ARGS(args, kIsDynamicMode, bool, out);
917 const auto& model_path = common::tools::ConvertUriToPath(args.get(kModelPath).get<std::string>());
918 CHECK_STORAGE_ACCESS(model_path, &out);
920 TensorsInfo* in_tensors_info = nullptr;
921 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
922 if (kNoId != inTensorId) {
923 in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
924 if (nullptr == in_tensors_info) {
925 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
926 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
931 TensorsInfo* out_tensors_info = nullptr;
932 auto outTensorId = static_cast<int>(args.get(kOutTensorsInfo).get<double>());
933 if (kNoId != outTensorId) {
934 out_tensors_info = GetTensorsInfoManager().GetTensorsInfo(outTensorId);
935 if (nullptr == out_tensors_info) {
936 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
937 ("Could not find TensorsInfo handle with given id: %d", outTensorId));
942 ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
943 PlatformResult result =
944 types::NNFWTypeEnum.getValue(args.get(kFwType).get<std::string>(), &nnfw_e);
946 LogAndReportError(result, &out);
950 ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
951 result = types::HWTypeEnum.getValue(args.get(kHwType).get<std::string>(), &hw_e);
953 LogAndReportError(result, &out);
957 auto is_dynamic_mode = args.get(kIsDynamicMode).get<bool>();
959 auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
960 is_dynamic_mode](decltype(out) out) {
961 PlatformResult result = common::tools::CheckFileAvailability(model_path);
964 PlatformResult(ErrorCode::NOT_FOUND_ERR, "File does not exist or is not accessible"),
965 &out, ("File does not exist or is not accessible: %s", model_path.c_str()));
970 result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
971 is_dynamic_mode, &res_id);
973 ReportError(result, &out);
977 out[kId] = picojson::value(static_cast<double>(res_id));
982 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
988 CHECK_ARGS(args, kCallbackId, double, out);
989 double callback_id = args.get(kCallbackId).get<double>();
990 this->worker_.add_job([this, callback_id, logic] {
991 picojson::value response = picojson::value(picojson::object());
992 picojson::object& async_out = response.get<picojson::object>();
993 async_out[kCallbackId] = picojson::value(callback_id);
995 this->PostMessage(response.serialize().c_str());
1003 void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out) {
1004 ScopeLogger("args: %s", args.serialize().c_str());
1005 CHECK_ARGS(args, kId, double, out);
1006 CHECK_ARGS(args, kGetInputMode, bool, out);
1008 auto id = static_cast<int>(args.get(kId).get<double>());
1009 // true means gathering input data; false means gathering output data
1010 auto get_input_mode = static_cast<int>(args.get(kGetInputMode).get<bool>());
1013 auto ret = single_manager_.GetNativeTensorsInfo(id, get_input_mode, &res_id);
1015 ReportError(ret, &out);
1019 out[kId] = picojson::value(static_cast<double>(res_id));
1023 void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out) {
1024 ScopeLogger("args: %s", args.serialize().c_str());
1025 CHECK_ARGS(args, kId, double, out);
1026 CHECK_ARGS(args, kInTensorsInfo, double, out);
1028 auto id = static_cast<int>(args.get(kId).get<double>());
1029 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
1031 TensorsInfo* in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
1032 if (nullptr == in_tensors_info) {
1033 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1034 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
1038 TensorsInfo* clone = GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
1040 auto ret = single_manager_.SetNativeInputInfo(id, in_tensors_info);
1042 ReportError(ret, &out);
1046 out[kId] = picojson::value(static_cast<double>(clone->Id()));
1050 void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::object& out) {
1051 ScopeLogger("args: %s", args.serialize().c_str());
1052 CHECK_ARGS(args, kId, double, out);
1053 CHECK_ARGS(args, kTensorsDataId, double, out);
1055 int id = static_cast<int>(args.get(kId).get<double>());
1056 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
1058 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
1060 TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
1061 if (async && in_tensors_data) {
1062 // in case of async flow need to prevent destroying entry data during invoke
1063 // from JS, creation of a copy
1064 in_tensors_data = GetTensorsInfoManager().CloneNativeTensorWithData(
1065 in_tensors_data->GetTensorsInfo()->Handle(), in_tensors_data->Handle());
1067 if (nullptr == in_tensors_data) {
1068 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
1069 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
1073 auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
1074 TensorsData* out_tensors_data = nullptr;
1075 auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
1077 // in case of async flow, the in_tensor_data with underlying TensorsInfo
1078 // was copied, thus need to be released here
1079 GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
1080 GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
1083 ReportError(ret, &out);
1087 out[kTensorsDataId] = picojson::value(static_cast<double>(out_tensors_data->Id()));
1088 out[kTensorsInfoId] = picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
1095 CHECK_ARGS(args, kCallbackId, double, out);
1096 double callback_id = args.get(kCallbackId).get<double>();
1097 this->worker_.add_job([this, callback_id, logic] {
1098 picojson::value response = picojson::value(picojson::object());
1099 picojson::object& async_out = response.get<picojson::object>();
1100 async_out[kCallbackId] = picojson::value(callback_id);
1102 this->PostMessage(response.serialize().c_str());
1110 void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {
1111 ScopeLogger("args: %s", args.serialize().c_str());
1112 CHECK_ARGS(args, kId, double, out);
1113 CHECK_ARGS(args, kName, std::string, out);
1115 auto id = static_cast<int>(args.get(kId).get<double>());
1116 const auto& name = args.get(kName).get<std::string>();
1118 auto ret = single_manager_.GetValue(id, name, value);
1120 ReportError(ret, &out);
1124 picojson::value val = picojson::value{value};
1125 ReportSuccess(val, out);
1128 void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::object& out) {
1129 ScopeLogger("args: %s", args.serialize().c_str());
1130 CHECK_ARGS(args, kId, double, out);
1131 CHECK_ARGS(args, kName, std::string, out);
1132 CHECK_ARGS(args, kValue, std::string, out);
1134 auto id = static_cast<int>(args.get(kId).get<double>());
1135 const auto& name = args.get(kName).get<std::string>();
1136 const auto& value = args.get(kValue).get<std::string>();
1138 auto ret = single_manager_.SetValue(id, name, value);
1140 ReportError(ret, &out);
1147 void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out) {
1148 ScopeLogger("args: %s", args.serialize().c_str());
1149 CHECK_ARGS(args, kId, double, out);
1150 CHECK_ARGS(args, kTimeout, double, out);
1152 auto id = static_cast<int>(args.get(kId).get<double>());
1153 auto timeout = static_cast<unsigned long>(args.get(kTimeout).get<double>());
1155 auto ret = single_manager_.SetTimeout(id, timeout);
1157 ReportError(ret, &out);
1164 void MlInstance::MLSingleShotClose(const picojson::value& args, picojson::object& out) {
1165 ScopeLogger("args: %s", args.serialize().c_str());
1166 CHECK_ARGS(args, kId, double, out);
1168 auto id = static_cast<int>(args.get(kId).get<double>());
1170 auto ret = single_manager_.Close(id);
1172 ReportError(ret, &out);
1181 bool CreatePipelineArgumentsAreInvalid(const picojson::value& args) {
1184 auto arguments_valid = args.get(kId).is<double>();
1185 arguments_valid &= args.get(kDefinition).is<std::string>();
1186 arguments_valid &= (args.get(kPipelineStateChangeListenerName).is<std::string>() ||
1187 args.get(kPipelineStateChangeListenerName).is<picojson::null>());
1188 LoggerD("CreatePipeline arguments are %s", arguments_valid ? "valid" : "invalid");
1190 return !arguments_valid;
1195 void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
1196 picojson::object& out) {
1197 ScopeLogger("args: %s", args.serialize().c_str());
1199 if (CreatePipelineArgumentsAreInvalid(args)) {
1200 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"}, &out);
1204 auto id = static_cast<int>(args.get(kId).get<double>());
1205 auto definition = args.get(kDefinition).get<std::string>();
1206 auto state_change_listener_name =
1207 args.get(kPipelineStateChangeListenerName).is<std::string>()
1208 ? args.get(kPipelineStateChangeListenerName).get<std::string>()
1211 auto ret = pipeline_manager_.CreatePipeline(id, definition, state_change_listener_name);
1214 ReportError(ret, &out);
1221 void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::object& out) {
1222 ScopeLogger("args: %s", args.serialize().c_str());
1224 if (!args.get(kId).is<double>()) {
1225 LoggerD("id is not a number");
1226 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1229 auto id = static_cast<int>(args.get(kId).get<double>());
1231 picojson::value state_value{std::string{}};
1232 std::string* state_ptr = &state_value.get<std::string>();
1233 auto ret = pipeline_manager_.GetPipelineState(id, state_ptr);
1235 ReportError(ret, &out);
1239 ReportSuccess(state_value, out);
1242 void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object& out) {
1243 ScopeLogger("args: %s", args.serialize().c_str());
1245 CHECK_ARGS(args, kId, double, out);
1247 auto id = static_cast<int>(args.get(kId).get<double>());
1249 PlatformResult result = pipeline_manager_.Start(id);
1252 ReportError(result, &out);
1259 void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& out) {
1260 ScopeLogger("args: %s", args.serialize().c_str());
1262 CHECK_ARGS(args, kId, double, out);
1264 auto id = static_cast<int>(args.get(kId).get<double>());
1266 PlatformResult result = pipeline_manager_.Stop(id);
1269 LogAndReportError(result, &out);
1276 void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object& out) {
1277 ScopeLogger("args: %s", args.serialize().c_str());
1279 if (!args.get(kId).is<double>()) {
1280 LoggerD("id is not a number");
1281 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1285 auto id = static_cast<int>(args.get(kId).get<double>());
1286 auto ret = pipeline_manager_.DisposePipeline(id);
1289 ReportError(ret, &out);
1296 void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out) {
1297 ScopeLogger("args: %s", args.serialize().c_str());
1299 CHECK_ARGS(args, kId, double, out);
1300 CHECK_ARGS(args, kName, std::string, out);
1302 auto name = args.get(kName).get<std::string>();
1303 auto id = static_cast<int>(args.get(kId).get<double>());
1305 PlatformResult result = pipeline_manager_.GetNodeInfo(id, name);
1308 LogAndReportError(result, &out);
1315 void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
1316 ScopeLogger("args: %s", args.serialize().c_str());
1318 CHECK_ARGS(args, kId, double, out);
1319 CHECK_ARGS(args, kName, std::string, out);
1321 auto name = args.get(kName).get<std::string>();
1322 auto id = static_cast<int>(args.get(kId).get<double>());
1324 PlatformResult result = pipeline_manager_.GetSource(id, name);
1327 LogAndReportError(result, &out);
1334 void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::object& out) {
1335 ScopeLogger("args: %s", args.serialize().c_str());
1337 if (!args.get(kId).is<double>()) {
1338 LoggerD("id is not a number");
1339 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1343 if (!args.get(kName).is<std::string>()) {
1344 LoggerD("name is not a string");
1345 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid name"}, &out);
1349 auto name = args.get(kName).get<std::string>();
1350 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1353 auto ret = pipeline_manager_.GetSwitch(name, pipeline_id, &type);
1355 LogAndReportError(ret, &out);
1359 out["type"] = picojson::value{type};
1363 void MlInstance::MLPipelineGetValve(const picojson::value& args, picojson::object& out) {
1364 ScopeLogger("args: %s", args.serialize().c_str());
1366 CHECK_ARGS(args, kId, double, out);
1367 CHECK_ARGS(args, kName, std::string, out);
1369 auto name = args.get(kName).get<std::string>();
1370 auto pipeline_id = args.get(kId).get<double>();
1372 auto ret = pipeline_manager_.GetValve(name, pipeline_id);
1374 LogAndReportError(ret, &out);
1381 void MlInstance::MLPipelineRegisterSinkListener(const picojson::value& args,
1382 picojson::object& out) {
1383 ScopeLogger("args: %s", args.serialize().c_str());
1385 CHECK_ARGS(args, kId, double, out);
1386 CHECK_ARGS(args, kName, std::string, out);
1387 CHECK_ARGS(args, kListenerName, std::string, out);
1389 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1390 auto sink_name = args.get(kName).get<std::string>();
1391 auto listener_name = args.get(kListenerName).get<std::string>();
1393 auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id, listener_name);
1395 LogAndReportError(ret, &out);
1402 void MlInstance::MLPipelineUnregisterSinkListener(const picojson::value& args,
1403 picojson::object& out) {
1404 ScopeLogger("args: %s", args.serialize().c_str());
1406 CHECK_ARGS(args, kId, double, out);
1407 CHECK_ARGS(args, kName, std::string, out);
1409 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1410 auto sink_name = args.get(kName).get<std::string>();
1412 auto ret = pipeline_manager_.UnregisterSinkListener(sink_name, pipeline_id);
1414 LogAndReportError(ret, &out);
1421 void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
1422 picojson::object& out) {
1423 ScopeLogger("args: %s", args.serialize().c_str());
1425 CHECK_ARGS(args, kName, std::string, out);
1426 CHECK_ARGS(args, kListenerName, std::string, out);
1427 CHECK_ARGS(args, kInputTensorsInfoId, double, out);
1428 CHECK_ARGS(args, kOutputTensorsInfoId, double, out);
1430 const auto& custom_filter_name = args.get(kName).get<std::string>();
1431 const auto& listener_name = args.get(kListenerName).get<std::string>();
1432 auto input_tensors_info_id = static_cast<int>(args.get(kInputTensorsInfoId).get<double>());
1433 auto output_tensors_info_id = static_cast<int>(args.get(kOutputTensorsInfoId).get<double>());
1435 TensorsInfo* input_tensors_info_ptr =
1436 GetTensorsInfoManager().GetTensorsInfo(input_tensors_info_id);
1437 if (!input_tensors_info_ptr) {
1439 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1440 ("Could not find TensorsInfo handle with given id: %d", input_tensors_info_id));
1444 TensorsInfo* output_tensors_info_ptr =
1445 GetTensorsInfoManager().GetTensorsInfo(output_tensors_info_id);
1446 if (!output_tensors_info_ptr) {
1448 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1449 ("Could not find TensorsInfo handle with given id: %d", output_tensors_info_id));
1453 auto ret = pipeline_manager_.RegisterCustomFilter(
1454 custom_filter_name, listener_name, input_tensors_info_ptr, output_tensors_info_ptr);
1456 LogAndReportError(ret, &out);
1463 void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args,
1464 picojson::object& out) {
1465 ScopeLogger("args: %s", args.serialize().c_str());
1467 CHECK_ARGS(args, kName, std::string, out);
1468 CHECK_ARGS(args, kStatus, double, out);
1469 CHECK_ARGS(args, kRequestId, double, out);
1471 const auto& custom_filter_name = args.get(kName).get<std::string>();
1472 auto status = static_cast<int>(args.get(kStatus).get<double>());
1473 auto request_id = static_cast<int>(args.get(kRequestId).get<double>());
1475 auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
1477 LogAndReportError(ret, &out);
1484 void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
1485 picojson::object& out) {
1486 ScopeLogger("args: %s", args.serialize().c_str());
1488 CHECK_ARGS(args, kName, std::string, out);
1490 const auto& custom_filter_name = args.get(kName).get<std::string>();
1492 auto ret = pipeline_manager_.UnregisterCustomFilter(custom_filter_name);
1494 LogAndReportError(ret, &out);
1501 void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out) {
1502 ScopeLogger("args: %s", args.serialize().c_str());
1504 CHECK_ARGS(args, kId, double, out);
1505 CHECK_ARGS(args, kNodeName, std::string, out);
1506 CHECK_ARGS(args, kName, std::string, out);
1507 CHECK_ARGS(args, kType, std::string, out);
1509 auto id = static_cast<int>(args.get(kId).get<double>());
1510 const auto& name = args.get(kName).get<std::string>();
1511 const auto& node_name = args.get(kNodeName).get<std::string>();
1512 const auto& type = args.get(kType).get<std::string>();
1514 PlatformResult result = pipeline_manager_.getProperty(id, node_name, name, type, &out);
1517 LogAndReportError(result, &out);
1524 void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out) {
1525 ScopeLogger("args: %s", args.serialize().c_str());
1527 CHECK_ARGS(args, kId, double, out);
1528 CHECK_ARGS(args, kNodeName, std::string, out);
1529 CHECK_ARGS(args, kName, std::string, out);
1530 CHECK_ARGS(args, kType, std::string, out);
1532 auto id = static_cast<int>(args.get(kId).get<double>());
1533 const auto& name = args.get(kName).get<std::string>();
1534 const auto& node_name = args.get(kNodeName).get<std::string>();
1535 const auto& type = args.get(kType).get<std::string>();
1537 CHECK_EXIST(args, kProperty, out);
1538 if (kBOOLEAN == type) {
1539 CHECK_TYPE(args, kProperty, bool, out, ErrorCode::TYPE_MISMATCH_ERR);
1540 } else if (kSTRING == type) {
1541 CHECK_TYPE(args, kProperty, std::string, out, ErrorCode::TYPE_MISMATCH_ERR);
1543 CHECK_TYPE(args, kProperty, double, out, ErrorCode::TYPE_MISMATCH_ERR);
1545 const picojson::value& property = args.get(kProperty);
1547 PlatformResult result = pipeline_manager_.setProperty(id, node_name, name, type, property);
1549 LogAndReportError(result, &out);
1556 void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
1557 ScopeLogger("args: [%s]", args.serialize().c_str());
1559 CHECK_ARGS(args, kId, double, out);
1560 CHECK_ARGS(args, kName, std::string, out);
1562 auto id = static_cast<int>(args.get(kId).get<double>());
1563 const auto& name = args.get(kName).get<std::string>();
1566 PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
1568 LogAndReportError(result, &out);
1575 void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson::object& out) {
1576 ScopeLogger("args: [%s]", args.serialize().c_str());
1578 CHECK_ARGS(args, kId, double, out);
1579 CHECK_ARGS(args, kName, std::string, out);
1580 CHECK_ARGS(args, kTensorsDataId, double, out);
1582 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1583 auto& source_name = args.get(kName).get<std::string>();
1584 auto tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
1586 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
1588 if (nullptr == tensors_data) {
1589 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
1590 ("Could not get TensorData handle with given id: %d", tensor_data_id));
1594 auto ret = pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
1596 LogAndReportError(ret, &out);
1603 void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out) {
1604 ScopeLogger("args: [%s]", args.serialize().c_str());
1606 CHECK_ARGS(args, kId, double, out);
1607 CHECK_ARGS(args, kName, std::string, out);
1609 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1610 auto& switch_name = args.get(kName).get<std::string>();
1612 picojson::array pad_list;
1613 auto ret = pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
1615 LogAndReportError(ret, &out);
1619 ReportSuccess(picojson::value{std::move(pad_list)}, out);
1622 void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out) {
1623 ScopeLogger("args: [%s]", args.serialize().c_str());
1625 CHECK_ARGS(args, kId, double, out);
1626 CHECK_ARGS(args, kName, std::string, out);
1627 CHECK_ARGS(args, kPadName, std::string, out);
1629 auto pipeline_id = args.get(kId).get<double>();
1630 auto& switch_name = args.get(kName).get<std::string>();
1631 auto& pad_name = args.get(kPadName).get<std::string>();
1633 auto ret = pipeline_manager_.SwitchSelect(pipeline_id, switch_name, pad_name);
1635 LogAndReportError(ret, &out);
1642 void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
1643 ScopeLogger("args: %s", args.serialize().c_str());
1645 CHECK_ARGS(args, kId, double, out);
1646 CHECK_ARGS(args, kName, std::string, out);
1647 CHECK_ARGS(args, kOpen, bool, out);
1649 auto name = args.get(kName).get<std::string>();
1650 auto pipeline_id = args.get(kId).get<double>();
1651 auto open = args.get(kOpen).get<bool>();
1653 auto ret = pipeline_manager_.ValveSetOpen(pipeline_id, name, open);
1655 LogAndReportError(ret, &out);
1662 void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
1663 ScopeLogger("args: %s", args.serialize().c_str());
1665 CHECK_ARGS(args, kId, double, out);
1666 CHECK_ARGS(args, kName, std::string, out);
1668 auto name = args.get(kName).get<std::string>();
1669 auto pipeline_id = args.get(kId).get<double>();
1672 auto ret = pipeline_manager_.ValveIsOpen(pipeline_id, name, &open);
1674 LogAndReportError(ret, &out);
1678 ReportSuccess(picojson::value{open}, out);
1689 } // namespace extension