2 * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include "ml_instance.h"
20 #include "common/converter.h"
21 #include "common/logger.h"
22 #include "common/picojson.h"
23 #include "common/platform_result.h"
24 #include "common/tools.h"
26 static_assert(ML_TENSOR_RANK_LIMIT == 4,
27 "This implementation requires different ML_TENSOR_RANK_LIMIT. Please fix the code.");
33 const int kCustomFilterSuccess = 0;
35 const std::string kAsync = "async";
36 const std::string kBOOLEAN = "BOOLEAN";
37 const std::string kBuffer = "buffer";
38 const std::string kCallbackId = "callbackId";
39 const std::string kDataId = "dataId";
40 const std::string kDefinition = "definition";
41 const std::string kDimensions = "dimensions";
42 const std::string kFwType = "fwType";
43 const std::string kGetInputMode = "getInputMode";
44 const std::string kHw = "hw";
45 const std::string kHwType = "hwType";
46 const std::string kId = "id";
47 const std::string kIndex = "index";
48 const std::string kInputTensorsInfoId = "inputTensorsInfoId";
49 const std::string kInTensorsInfo = "inTensorsInfo";
50 const std::string kIsDynamicMode = "isDynamicMode";
51 const std::string kListenerName = "listenerName";
52 const std::string kLocation = "location";
53 const std::string kModelPath = "modelPath";
54 const std::string kName = "name";
55 const std::string kNnfw = "nnfw";
56 const std::string kCustomRequirement = "customRequirement";
57 const std::string kNodeName = "nodeName";
58 const std::string kOpen = "open";
59 const std::string kOtherId = "otherId";
60 const std::string kOutputTensorsInfoId = "outputTensorsInfoId";
61 const std::string kOutTensorsInfo = "outTensorsInfo";
62 const std::string kPadName = "padName";
63 const std::string kPipelineStateChangeListenerName = "listenerName";
64 const std::string kProperty = "property";
65 const std::string kRequestId = "requestId";
66 const std::string kShape = "shape";
67 const std::string kSize = "size";
68 const std::string kStatus = "status";
69 const std::string kSTRING = "STRING";
70 const std::string kTensorsDataId = "tensorsDataId";
71 const std::string kTensorsInfoId = "tensorsInfoId";
72 const std::string kTimeout = "timeout";
73 const std::string kType = "type";
74 const std::string kValue = "value";
77 using namespace common;
79 #define CHECK_EXIST(args, name, out) \
80 if (!args.contains(name)) { \
81 std::string msg = std::string(name) + " is required argument"; \
82 LogAndReportError(PlatformResult(ErrorCode::TYPE_MISMATCH_ERR, msg), &out); \
86 // CHECK_TYPE will throw AbortError by default, but it can be changed by providing
87 // additional parameter to the macro, i.e.:
88 // CHECK_TYPE(args, "name", std::string, out, ErrorCode::TYPE_MISMATCH_ERR)
89 #define CHECK_TYPE(...) CHECK_TYPE_X(__VA_ARGS__, CHECK_TYPE_5, CHECK_TYPE_4)(__VA_ARGS__)
90 #define CHECK_TYPE_X(_1, _2, _3, _4, _5, EXC_TYPE, ...) EXC_TYPE
91 #define CHECK_TYPE_5(args, name, type, out, error_type) \
92 if (!args.get(name).is<type>()) { \
93 std::string msg = std::string(name) + " has invalid type"; \
94 LogAndReportError(PlatformResult(error_type, msg), &out); \
97 #define CHECK_TYPE_4(args, name, type, out) \
98 CHECK_TYPE_5(args, name, type, out, ErrorCode::ABORT_ERR)
100 #define CHECK_ARGS(args, name, type, out) \
101 CHECK_EXIST(args, name, out) \
102 CHECK_TYPE(args, name, type, out)
104 MlInstance::MlInstance()
105 : tensors_info_manager_{&tensors_data_manager_},
106 single_manager_{&tensors_info_manager_},
107 pipeline_manager_{this, &tensors_info_manager_, &tensors_data_manager_} {
109 using namespace std::placeholders;
111 #define REGISTER_METHOD(M) RegisterSyncHandler(#M, std::bind(&MlInstance::M, this, _1, _2))
112 #define REGISTER_BINARY_METHOD(M) RegisterBinaryHandler(std::bind(&MlInstance::M, this, _1, _2, _3))
114 REGISTER_METHOD(MLCheckNNFWAvailability);
115 REGISTER_METHOD(MLTensorsInfoCountGetter);
116 REGISTER_METHOD(MLTensorsInfoAddTensorInfo);
117 REGISTER_METHOD(MLTensorsInfoCreate);
118 REGISTER_METHOD(MLTensorsInfoGetDimensions);
119 REGISTER_METHOD(MLTensorsInfoSetDimensions);
120 REGISTER_METHOD(MLTensorsInfoGetTensorName);
121 REGISTER_METHOD(MLTensorsInfoSetTensorName);
122 REGISTER_METHOD(MLTensorsInfoGetTensorSize);
123 REGISTER_METHOD(MLTensorsInfoGetTensorType);
124 REGISTER_METHOD(MLTensorsInfoSetTensorType);
125 REGISTER_METHOD(MLTensorsInfoGetTensorsData);
126 REGISTER_METHOD(MLTensorsInfoClone);
127 REGISTER_METHOD(MLTensorsInfoEquals);
128 REGISTER_METHOD(MLTensorsInfoDispose);
129 REGISTER_METHOD(MLPipelineValveSetOpen);
130 REGISTER_METHOD(MLPipelineValveIsOpen);
132 REGISTER_METHOD(MLTensorsDataDispose);
133 REGISTER_METHOD(MLTensorsDataGetTensorRawData);
134 REGISTER_METHOD(MLTensorsDataGetTensorType);
135 REGISTER_METHOD(MLTensorsDataSetTensorRawData);
136 REGISTER_BINARY_METHOD(MLTensorsDataSetTensorRawDataBinary);
138 REGISTER_METHOD(MLSingleManagerOpenModel);
139 REGISTER_METHOD(MLSingleShotGetTensorsInfo);
140 REGISTER_METHOD(MLSingleShotSetInputInfo);
141 REGISTER_METHOD(MLSingleShotInvoke);
142 REGISTER_METHOD(MLSingleShotGetValue);
143 REGISTER_METHOD(MLSingleShotSetValue);
144 REGISTER_METHOD(MLSingleShotSetTimeout);
145 REGISTER_METHOD(MLSingleShotClose);
147 REGISTER_METHOD(MLPipelineManagerCreatePipeline);
148 REGISTER_METHOD(MLPipelineGetState);
149 REGISTER_METHOD(MLPipelineDispose);
150 REGISTER_METHOD(MLPipelineStart);
151 REGISTER_METHOD(MLPipelineStop);
152 REGISTER_METHOD(MLPipelineGetNodeInfo);
153 REGISTER_METHOD(MLPipelineGetSwitch);
154 REGISTER_METHOD(MLPipelineSwitchGetPadList);
155 REGISTER_METHOD(MLPipelineSwitchSelect);
156 REGISTER_METHOD(MLPipelineGetValve);
157 REGISTER_METHOD(MLPipelineNodeInfoGetProperty);
158 REGISTER_METHOD(MLPipelineNodeInfoSetProperty);
159 REGISTER_METHOD(MLPipelineGetSource);
160 REGISTER_METHOD(MLPipelineGetInputTensorsInfo);
161 REGISTER_METHOD(MLPipelineSourceInputData);
162 REGISTER_METHOD(MLPipelineRegisterSinkListener);
163 REGISTER_METHOD(MLPipelineUnregisterSinkListener);
164 REGISTER_METHOD(MLPipelineManagerRegisterCustomFilter);
165 REGISTER_METHOD(MLPipelineManagerCustomFilterOutput);
166 REGISTER_METHOD(MLPipelineManagerUnregisterCustomFilter);
168 #undef REGISTER_METHOD
171 MlInstance::~MlInstance() {
176 TensorsInfoManager& MlInstance::GetTensorsInfoManager() {
177 return tensors_info_manager_;
180 TensorsDataManager& MlInstance::GetTensorsDataManager() {
181 return tensors_data_manager_;
184 void MlInstance::MLCheckNNFWAvailability(const picojson::value& args, picojson::object& out) {
185 ScopeLogger("args: %s", args.serialize().c_str());
186 CHECK_EXIST(args, kNnfw, out)
187 CHECK_EXIST(args, kHw, out)
188 CHECK_EXIST(args, kCustomRequirement, out)
190 std::string nnfw = args.get(kNnfw).get<std::string>();
191 std::string hw = args.get(kHw).get<std::string>();
192 optional<std::string> customRequirement;
193 if (args.get(kCustomRequirement).is<std::string>()) {
194 customRequirement = args.get(kCustomRequirement).get<std::string>();
196 bool availability_val = util::CheckNNFWAvailability(nnfw, hw, customRequirement);
198 picojson::value available = picojson::value{availability_val};
199 ReportSuccess(available, out);
202 void MlInstance::MLTensorsInfoCreate(const picojson::value& args, picojson::object& out) {
203 ScopeLogger("args: %s", args.serialize().c_str());
205 TensorsInfo* tensorsInfo = GetTensorsInfoManager().CreateTensorsInfo();
206 if (nullptr == tensorsInfo) {
207 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
208 ("Could not create new TensorsInfo handle"));
211 out[kId] = picojson::value(static_cast<double>(tensorsInfo->Id()));
215 void MlInstance::MLTensorsInfoCountGetter(const picojson::value& args, picojson::object& out) {
216 ScopeLogger("args: %s", args.serialize().c_str());
217 CHECK_ARGS(args, kTensorsInfoId, double, out);
219 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
220 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
221 if (nullptr == tensorsInfo) {
222 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
223 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
226 unsigned int count = 0;
227 PlatformResult result = tensorsInfo->NativeGetCount(&count);
229 ReportError(result, &out);
232 picojson::value val = picojson::value{static_cast<double>(count)};
233 ReportSuccess(val, out);
236 void MlInstance::MLTensorsInfoAddTensorInfo(const picojson::value& args, picojson::object& out) {
237 ScopeLogger("args: %s", args.serialize().c_str());
238 CHECK_ARGS(args, kTensorsInfoId, double, out);
239 CHECK_ARGS(args, kType, std::string, out);
241 CHECK_EXIST(args, kName, out);
242 CHECK_ARGS(args, kDimensions, picojson::array, out);
244 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
245 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
246 if (nullptr == tensorsInfo) {
247 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
248 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
252 const std::string& tensorType = args.get(kType).get<std::string>();
253 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
254 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
256 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
258 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
263 if (args.get(kName).is<std::string>()) {
264 name = args.get(kName).get<std::string>();
265 LoggerD("name: %s", name.c_str());
268 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
269 auto dim = args.get(kDimensions).get<picojson::array>();
270 result = util::GetDimensionsFromJsonArray(dim, dimensions);
272 LogAndReportError(result, &out);
276 result = tensorsInfo->AddTensorInfo(name, tensorTypeEnum, dimensions);
278 LogAndReportError(result, &out);
282 int count = tensorsInfo->Count() - 1;
284 picojson::value val = picojson::value{static_cast<double>(count)};
285 ReportSuccess(val, out);
288 void MlInstance::MLTensorsInfoGetDimensions(const picojson::value& args, picojson::object& out) {
289 ScopeLogger("args: %s", args.serialize().c_str());
290 CHECK_ARGS(args, kTensorsInfoId, double, out);
291 CHECK_ARGS(args, kIndex, double, out);
293 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
294 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
295 if (nullptr == tensorsInfo) {
296 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
297 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
300 int index = static_cast<int>(args.get(kIndex).get<double>());
301 unsigned int dim[ML_TENSOR_RANK_LIMIT];
302 PlatformResult result = tensorsInfo->NativeGetTensorDimensions(index, dim);
304 LogAndReportError(result, &out);
307 picojson::array array = picojson::array{};
308 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
312 array.push_back(picojson::value{static_cast<double>(dim[i])});
314 picojson::value val = picojson::value{array};
315 ReportSuccess(val, out);
318 void MlInstance::MLTensorsInfoSetDimensions(const picojson::value& args, picojson::object& out) {
319 ScopeLogger("args: %s", args.serialize().c_str());
320 CHECK_ARGS(args, kTensorsInfoId, double, out);
321 CHECK_ARGS(args, kIndex, double, out);
322 CHECK_ARGS(args, kDimensions, picojson::array, out);
324 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
325 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
326 if (nullptr == tensorsInfo) {
327 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
328 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
332 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
333 auto dim = args.get(kDimensions).get<picojson::array>();
334 PlatformResult result = util::GetDimensionsFromJsonArray(dim, dimensions);
336 LogAndReportError(result, &out);
340 int index = static_cast<int>(args.get(kIndex).get<double>());
341 result = tensorsInfo->NativeSetTensorDimensions(index, dimensions);
343 LogAndReportError(result, &out);
349 void MlInstance::MLTensorsInfoGetTensorName(const picojson::value& args, picojson::object& out) {
350 ScopeLogger("args: %s", args.serialize().c_str());
351 CHECK_ARGS(args, kTensorsInfoId, double, out);
352 CHECK_ARGS(args, kIndex, double, out);
354 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
355 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
356 if (nullptr == tensorsInfo) {
357 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
358 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
361 int index = static_cast<int>(args.get(kIndex).get<double>());
363 PlatformResult result = tensorsInfo->NativeGetTensorName(index, &name);
365 LogAndReportError(result, &out);
368 picojson::value val = picojson::value{name};
369 ReportSuccess(val, out);
372 void MlInstance::MLTensorsInfoSetTensorName(const picojson::value& args, picojson::object& out) {
373 ScopeLogger("args: %s", args.serialize().c_str());
374 CHECK_ARGS(args, kTensorsInfoId, double, out);
375 CHECK_ARGS(args, kIndex, double, out);
376 CHECK_ARGS(args, kName, std::string, out);
378 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
379 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
380 if (nullptr == tensorsInfo) {
381 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
382 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
386 int index = static_cast<int>(args.get(kIndex).get<double>());
387 const std::string& name = args.get(kName).get<std::string>();
388 PlatformResult result = tensorsInfo->NativeSetTensorName(index, name);
390 LogAndReportError(result, &out);
396 void MlInstance::MLTensorsInfoGetTensorSize(const picojson::value& args, picojson::object& out) {
397 ScopeLogger("args: %s", args.serialize().c_str());
398 CHECK_ARGS(args, kTensorsInfoId, double, out);
399 CHECK_ARGS(args, kIndex, double, out);
401 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
402 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
403 if (nullptr == tensorsInfo) {
404 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
405 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
408 int index = static_cast<int>(args.get(kIndex).get<double>());
410 PlatformResult result = tensorsInfo->NativeGetTensorSize(index, &size);
412 LogAndReportError(result, &out);
416 picojson::value val = picojson::value{static_cast<double>(size)};
417 ReportSuccess(val, out);
420 void MlInstance::MLTensorsInfoGetTensorType(const picojson::value& args, picojson::object& out) {
421 ScopeLogger("args: %s", args.serialize().c_str());
422 CHECK_ARGS(args, kTensorsInfoId, double, out);
423 CHECK_ARGS(args, kIndex, double, out);
425 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
426 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
427 if (nullptr == tensorsInfo) {
428 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
429 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
433 int index = static_cast<int>(args.get(kIndex).get<double>());
434 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
435 PlatformResult result = tensorsInfo->NativeGetTensorType(index, &tensorTypeEnum);
437 LogAndReportError(result, &out);
440 std::string tensorTypeString;
441 result = types::TensorTypeEnum.getName(tensorTypeEnum, &tensorTypeString);
443 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
445 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
449 picojson::value val = picojson::value{tensorTypeString};
450 ReportSuccess(val, out);
453 void MlInstance::MLTensorsInfoSetTensorType(const picojson::value& args, picojson::object& out) {
454 ScopeLogger("args: %s", args.serialize().c_str());
455 CHECK_ARGS(args, kTensorsInfoId, double, out);
456 CHECK_ARGS(args, kIndex, double, out);
457 CHECK_ARGS(args, kType, std::string, out);
459 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
460 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
461 if (nullptr == tensorsInfo) {
462 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
463 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
467 const std::string& tensorType = args.get(kType).get<std::string>();
468 ml_tensor_type_e tensorTypeEnum = ML_TENSOR_TYPE_UNKNOWN;
469 PlatformResult result = types::TensorTypeEnum.getValue(tensorType, &tensorTypeEnum);
471 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting value of TensorType"),
473 ("TensorTypeEnum.getValue() failed, error: %s", result.message().c_str()));
477 int index = static_cast<int>(args.get(kIndex).get<double>());
478 result = tensorsInfo->NativeSetTensorType(index, tensorTypeEnum);
480 LogAndReportError(result, &out);
486 void MlInstance::MLTensorsInfoGetTensorsData(const picojson::value& args, picojson::object& out) {
487 ScopeLogger("args: %s", args.serialize().c_str());
488 CHECK_ARGS(args, kTensorsInfoId, double, out);
490 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
491 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
492 if (nullptr == tensorsInfo) {
493 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
494 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
498 TensorsData* tensorsData = GetTensorsInfoManager().CreateTensorsData(tensorsInfo);
500 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
501 ("Could not create TensorsData"));
505 out[kTensorsDataId] = picojson::value(static_cast<double>(tensorsData->Id()));
506 out[kTensorsInfoId] = picojson::value(static_cast<double>(tensorsData->TensorsInfoId()));
510 void MlInstance::MLTensorsInfoClone(const picojson::value& args, picojson::object& out) {
511 ScopeLogger("args: %s", args.serialize().c_str());
512 CHECK_ARGS(args, kTensorsInfoId, double, out);
514 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
515 TensorsInfo* tensorsInfo = GetTensorsInfoManager().GetTensorsInfo(tensorsInfoId);
516 if (nullptr == tensorsInfo) {
517 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
518 ("Could not find TensorsInfo handle with given id: %d", tensorsInfoId));
522 TensorsInfo* cloned = GetTensorsInfoManager().CloneTensorsInfo(tensorsInfo);
523 if (nullptr == cloned) {
524 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
525 ("Could not clone TensorsInfo with given id: %d", tensorsInfoId));
529 out["id"] = picojson::value(static_cast<double>(cloned->Id()));
533 void MlInstance::MLTensorsInfoEquals(const picojson::value& args, picojson::object& out) {
534 ScopeLogger("args: %s", args.serialize().c_str());
535 CHECK_ARGS(args, kTensorsInfoId, double, out);
536 CHECK_ARGS(args, kOtherId, double, out);
538 int firstId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
539 int secondId = static_cast<int>(args.get(kOtherId).get<double>());
541 TensorsInfo* first = GetTensorsInfoManager().GetTensorsInfo(firstId);
542 if (nullptr == first) {
543 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
544 ("Could not find TensorsInfo handle with given id: %d", firstId));
548 TensorsInfo* second = GetTensorsInfoManager().GetTensorsInfo(secondId);
549 if (nullptr == second) {
550 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
551 ("Could not find TensorsInfo handle with given id: %d", secondId));
555 bool equals = first->Equals(second);
556 picojson::value val = picojson::value{equals};
557 ReportSuccess(val, out);
560 void MlInstance::MLTensorsInfoDispose(const picojson::value& args, picojson::object& out) {
561 ScopeLogger("args: %s", args.serialize().c_str());
562 CHECK_ARGS(args, kTensorsInfoId, double, out);
563 int tensorsInfoId = static_cast<int>(args.get(kTensorsInfoId).get<double>());
565 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensorsInfoId);
567 LogAndReportError(result, &out);
572 void MlInstance::MLTensorsDataDispose(const picojson::value& args, picojson::object& out) {
573 ScopeLogger("args: %s", args.serialize().c_str());
574 CHECK_ARGS(args, kTensorsDataId, double, out);
575 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
577 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
578 if (nullptr == tensors_data) {
579 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
580 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
584 if (!tensors_data->DisposableFromJS()) {
589 // Dispose underlying tensorsInfo
590 PlatformResult result = GetTensorsInfoManager().DisposeTensorsInfo(tensors_data->TensorsInfoId());
592 LogAndReportError(result, &out);
596 result = GetTensorsDataManager().DisposeTensorsData(tensors_data_id);
598 LogAndReportError(result, &out);
603 void MlInstance::MLTensorsDataGetTensorRawData(const picojson::value& args, picojson::object& out) {
604 ScopeLogger("args: %s", args.serialize().c_str());
605 CHECK_ARGS(args, kTensorsDataId, double, out);
606 CHECK_ARGS(args, kIndex, double, out);
607 CHECK_ARGS(args, kLocation, picojson::array, out);
608 CHECK_ARGS(args, kSize, picojson::array, out);
610 int tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
611 int index = static_cast<int>(args.get(kIndex).get<double>());
613 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
614 if (nullptr == tensors_data) {
615 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
616 ("Could not find TensorsData handle with given id: %d", tensor_data_id));
620 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
621 PlatformResult result =
622 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
624 LogAndReportError(result, &out);
628 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
629 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
631 LogAndReportError(result, &out);
635 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
636 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
639 LogAndReportError(result, &out);
643 TensorRawData raw_data;
644 result = tensors_data->GetTensorRawData(index, location, size, &raw_data);
646 LogAndReportError(result, &out);
650 std::vector<std::uint8_t> out_data{raw_data.data, raw_data.data + raw_data.size_in_bytes};
651 out[kBuffer] = picojson::value(picojson::string_type, true);
652 common::encode_binary_in_string(out_data, out[kBuffer].get<std::string>());
654 out[kType] = picojson::value(raw_data.type_str);
655 picojson::array shape = picojson::array{};
656 for (int i = 0; i < ML_TENSOR_RANK_LIMIT; i++) {
657 shape.push_back(picojson::value{static_cast<double>(raw_data.shape[i])});
659 out[kShape] = picojson::value{shape};
664 void MlInstance::MLTensorsDataGetTensorType(const picojson::value& args, picojson::object& out) {
665 ScopeLogger("args: %s", args.serialize().c_str());
666 CHECK_ARGS(args, kTensorsDataId, double, out);
667 CHECK_ARGS(args, kIndex, double, out);
669 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
670 int index = static_cast<int>(args.get(kIndex).get<double>());
672 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
673 if (nullptr == tensors_data) {
674 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
675 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
679 std::string tensor_type_string;
680 PlatformResult result =
681 types::TensorTypeEnum.getName(tensors_data->GetTensorType(index), &tensor_type_string);
683 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Error getting name of TensorType"),
685 ("TensorTypeEnum.getName() failed, error: %s", result.message().c_str()));
689 picojson::value val = picojson::value{tensor_type_string};
690 ReportSuccess(val, out);
693 void MlInstance::MLTensorsDataSetTensorRawData(const picojson::value& args, picojson::object& out) {
695 CHECK_ARGS(args, kTensorsDataId, double, out);
696 CHECK_ARGS(args, kIndex, double, out);
697 CHECK_ARGS(args, kBuffer, std::string, out);
698 CHECK_ARGS(args, kLocation, picojson::array, out);
699 CHECK_ARGS(args, kSize, picojson::array, out);
700 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
701 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
702 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
703 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
705 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
706 int index = static_cast<int>(args.get(kIndex).get<double>());
708 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
709 if (nullptr == tensors_data) {
710 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
711 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
715 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
716 PlatformResult result =
717 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
719 LogAndReportError(result, &out);
723 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
724 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
726 LogAndReportError(result, &out);
730 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
731 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
734 LogAndReportError(result, &out);
737 const std::string& str_buffer = args.get(kBuffer).get<std::string>();
738 std::vector<std::uint8_t> buffer;
739 common::decode_binary_from_string(str_buffer, buffer);
741 TensorRawData raw_data{buffer.data(), buffer.size()};
742 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
744 LogAndReportError(result, &out);
751 void MlInstance::MLTensorsDataSetTensorRawDataBinary(const char* data, size_t data_size,
752 picojson::object& out) {
755 METHOD_ID WAS ALREADY REMOVED during message handling
756 other data packed with following format:
758 // 1 byte === methodIndex /// already parsed
759 // 4 byte === JSON lenght (N)
760 // 4 byte === buffer length (M)
761 // N bytest === JSON data
762 // M bytes === buffer data
764 unsigned int call_args_len_begin = 0;
765 unsigned int call_args_len = static_cast<unsigned int>(
766 (data[call_args_len_begin] << 24) + (data[call_args_len_begin + 1] << 16) +
767 (data[call_args_len_begin + 2] << 8) + (data[call_args_len_begin + 3]));
769 unsigned int buffer_len_begin = call_args_len_begin + 4;
770 unsigned int buffer_len = static_cast<unsigned int>(
771 (data[buffer_len_begin] << 24) + (data[buffer_len_begin + 1] << 16) +
772 (data[buffer_len_begin + 2] << 8) + (data[buffer_len_begin + 3]));
774 unsigned int call_args_begin = buffer_len_begin + 4;
775 std::string call_args(data + call_args_begin, call_args_len);
777 picojson::value args;
778 picojson::parse(args, call_args);
780 unsigned int buffer_begin = call_args_begin + call_args_len;
782 CHECK_ARGS(args, kTensorsDataId, double, out);
783 CHECK_ARGS(args, kIndex, double, out);
784 CHECK_ARGS(args, kLocation, picojson::array, out);
785 CHECK_ARGS(args, kSize, picojson::array, out);
786 LoggerD("%s, %s", kTensorsDataId.c_str(), args.get(kTensorsDataId).serialize().c_str());
787 LoggerD("%s, %s", kIndex.c_str(), args.get(kIndex).serialize().c_str());
788 LoggerD("%s, %s", kLocation.c_str(), args.get(kLocation).serialize().c_str());
789 LoggerD("%s, %s", kSize.c_str(), args.get(kSize).serialize().c_str());
791 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
792 int index = static_cast<int>(args.get(kIndex).get<double>());
794 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
795 if (nullptr == tensors_data) {
796 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
797 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
801 unsigned int location[ML_TENSOR_RANK_LIMIT] = {};
802 PlatformResult result =
803 util::GetLocationFromJsonArray(args.get(kLocation).get<picojson::array>(), location);
805 LogAndReportError(result, &out);
809 unsigned int dimensions[ML_TENSOR_RANK_LIMIT] = {};
810 result = tensors_data->GetTensorsInfo()->NativeGetTensorDimensions(index, dimensions);
812 LogAndReportError(result, &out);
816 unsigned int size[ML_TENSOR_RANK_LIMIT] = {};
817 result = util::GetSizeFromJsonArray(args.get(kSize).get<picojson::array>(), location, dimensions,
820 LogAndReportError(result, &out);
824 TensorRawData raw_data{reinterpret_cast<uint8_t*>(const_cast<char*>(data + buffer_begin)),
826 result = tensors_data->SetTensorRawData(index, location, size, raw_data);
828 LogAndReportError(result, &out);
835 void MlInstance::MLSingleManagerOpenModel(const picojson::value& args, picojson::object& out) {
836 ScopeLogger("args: %s", args.serialize().c_str());
837 CHECK_ARGS(args, kModelPath, std::string, out);
838 CHECK_ARGS(args, kInTensorsInfo, double, out);
839 CHECK_ARGS(args, kOutTensorsInfo, double, out);
840 CHECK_ARGS(args, kFwType, std::string, out);
841 CHECK_ARGS(args, kHwType, std::string, out);
842 CHECK_ARGS(args, kIsDynamicMode, bool, out);
844 const auto& model_path = common::tools::ConvertUriToPath(args.get(kModelPath).get<std::string>());
845 CHECK_STORAGE_ACCESS(model_path, &out);
847 TensorsInfo* in_tensors_info = nullptr;
848 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
849 if (kNoId != inTensorId) {
850 in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
851 if (nullptr == in_tensors_info) {
852 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
853 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
858 TensorsInfo* out_tensors_info = nullptr;
859 auto outTensorId = static_cast<int>(args.get(kOutTensorsInfo).get<double>());
860 if (kNoId != outTensorId) {
861 out_tensors_info = GetTensorsInfoManager().GetTensorsInfo(outTensorId);
862 if (nullptr == out_tensors_info) {
863 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
864 ("Could not find TensorsInfo handle with given id: %d", outTensorId));
869 ml_nnfw_type_e nnfw_e = ML_NNFW_TYPE_ANY;
870 PlatformResult result =
871 types::NNFWTypeEnum.getValue(args.get(kFwType).get<std::string>(), &nnfw_e);
873 LogAndReportError(result, &out);
877 ml_nnfw_hw_e hw_e = ML_NNFW_HW_ANY;
878 result = types::HWTypeEnum.getValue(args.get(kHwType).get<std::string>(), &hw_e);
880 LogAndReportError(result, &out);
884 auto is_dynamic_mode = args.get(kIsDynamicMode).get<bool>();
886 auto logic = [this, model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
887 is_dynamic_mode](decltype(out) out) {
888 PlatformResult result = common::tools::CheckFileAvailability(model_path);
891 PlatformResult(ErrorCode::NOT_FOUND_ERR, "File does not exist or is not accessible"),
892 &out, ("File does not exist or is not accessible: %s", model_path.c_str()));
897 result = single_manager_.OpenModel(model_path, in_tensors_info, out_tensors_info, nnfw_e, hw_e,
898 is_dynamic_mode, &res_id);
900 ReportError(result, &out);
904 out[kId] = picojson::value(static_cast<double>(res_id));
909 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
915 CHECK_ARGS(args, kCallbackId, double, out);
916 double callback_id = args.get(kCallbackId).get<double>();
917 this->worker_.add_job([this, callback_id, logic] {
918 picojson::value response = picojson::value(picojson::object());
919 picojson::object& async_out = response.get<picojson::object>();
920 async_out[kCallbackId] = picojson::value(callback_id);
922 this->PostMessage(response.serialize().c_str());
930 void MlInstance::MLSingleShotGetTensorsInfo(const picojson::value& args, picojson::object& out) {
931 ScopeLogger("args: %s", args.serialize().c_str());
932 CHECK_ARGS(args, kId, double, out);
933 CHECK_ARGS(args, kGetInputMode, bool, out);
935 auto id = static_cast<int>(args.get(kId).get<double>());
936 // true means gathering input data; false means gathering output data
937 auto get_input_mode = static_cast<int>(args.get(kGetInputMode).get<bool>());
940 auto ret = single_manager_.GetNativeTensorsInfo(id, get_input_mode, &res_id);
942 ReportError(ret, &out);
946 out[kId] = picojson::value(static_cast<double>(res_id));
950 void MlInstance::MLSingleShotSetInputInfo(const picojson::value& args, picojson::object& out) {
951 ScopeLogger("args: %s", args.serialize().c_str());
952 CHECK_ARGS(args, kId, double, out);
953 CHECK_ARGS(args, kInTensorsInfo, double, out);
955 auto id = static_cast<int>(args.get(kId).get<double>());
956 auto inTensorId = static_cast<int>(args.get(kInTensorsInfo).get<double>());
958 TensorsInfo* in_tensors_info = GetTensorsInfoManager().GetTensorsInfo(inTensorId);
959 if (nullptr == in_tensors_info) {
960 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
961 ("Could not find TensorsInfo handle with given id: %d", inTensorId));
965 TensorsInfo* clone = GetTensorsInfoManager().CloneTensorsInfo(in_tensors_info);
967 auto ret = single_manager_.SetNativeInputInfo(id, in_tensors_info);
969 ReportError(ret, &out);
973 out[kId] = picojson::value(static_cast<double>(clone->Id()));
977 void MlInstance::MLSingleShotInvoke(const picojson::value& args, picojson::object& out) {
978 ScopeLogger("args: %s", args.serialize().c_str());
979 CHECK_ARGS(args, kId, double, out);
980 CHECK_ARGS(args, kTensorsDataId, double, out);
982 int id = static_cast<int>(args.get(kId).get<double>());
983 int tensors_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
985 (args.contains(kAsync) && args.get(kAsync).is<bool>()) ? args.get(kAsync).get<bool>() : false;
987 TensorsData* in_tensors_data = GetTensorsDataManager().GetTensorsData(tensors_data_id);
988 if (async && in_tensors_data) {
989 // in case of async flow need to prevent destroying entry data during invoke
990 // from JS, creation of a copy
991 in_tensors_data = GetTensorsInfoManager().CloneNativeTensorWithData(
992 in_tensors_data->GetTensorsInfo()->Handle(), in_tensors_data->Handle());
994 if (nullptr == in_tensors_data) {
995 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsData error"), &out,
996 ("Could not find TensorsData handle with given id: %d", tensors_data_id));
1000 auto logic = [this, id, tensors_data_id, in_tensors_data, async](decltype(out) out) {
1001 TensorsData* out_tensors_data = nullptr;
1002 auto ret = single_manager_.Invoke(id, in_tensors_data, &out_tensors_data);
1004 // in case of async flow, the in_tensor_data with underlying TensorsInfo
1005 // was copied, thus need to be released here
1006 GetTensorsInfoManager().DisposeTensorsInfo(in_tensors_data->GetTensorsInfo());
1007 GetTensorsDataManager().DisposeTensorsData(in_tensors_data);
1010 ReportError(ret, &out);
1014 out[kTensorsDataId] = picojson::value(static_cast<double>(out_tensors_data->Id()));
1015 out[kTensorsInfoId] = picojson::value(static_cast<double>(out_tensors_data->TensorsInfoId()));
1022 CHECK_ARGS(args, kCallbackId, double, out);
1023 double callback_id = args.get(kCallbackId).get<double>();
1024 this->worker_.add_job([this, callback_id, logic] {
1025 picojson::value response = picojson::value(picojson::object());
1026 picojson::object& async_out = response.get<picojson::object>();
1027 async_out[kCallbackId] = picojson::value(callback_id);
1029 this->PostMessage(response.serialize().c_str());
1037 void MlInstance::MLSingleShotGetValue(const picojson::value& args, picojson::object& out) {
1038 ScopeLogger("args: %s", args.serialize().c_str());
1039 CHECK_ARGS(args, kId, double, out);
1040 CHECK_ARGS(args, kName, std::string, out);
1042 auto id = static_cast<int>(args.get(kId).get<double>());
1043 const auto& name = args.get(kName).get<std::string>();
1045 auto ret = single_manager_.GetValue(id, name, value);
1047 ReportError(ret, &out);
1051 picojson::value val = picojson::value{value};
1052 ReportSuccess(val, out);
1055 void MlInstance::MLSingleShotSetValue(const picojson::value& args, picojson::object& out) {
1056 ScopeLogger("args: %s", args.serialize().c_str());
1057 CHECK_ARGS(args, kId, double, out);
1058 CHECK_ARGS(args, kName, std::string, out);
1059 CHECK_ARGS(args, kValue, std::string, out);
1061 auto id = static_cast<int>(args.get(kId).get<double>());
1062 const auto& name = args.get(kName).get<std::string>();
1063 const auto& value = args.get(kValue).get<std::string>();
1065 auto ret = single_manager_.SetValue(id, name, value);
1067 ReportError(ret, &out);
1074 void MlInstance::MLSingleShotSetTimeout(const picojson::value& args, picojson::object& out) {
1075 ScopeLogger("args: %s", args.serialize().c_str());
1076 CHECK_ARGS(args, kId, double, out);
1077 CHECK_ARGS(args, kTimeout, double, out);
1079 auto id = static_cast<int>(args.get(kId).get<double>());
1080 auto timeout = static_cast<unsigned long>(args.get(kTimeout).get<double>());
1082 auto ret = single_manager_.SetTimeout(id, timeout);
1084 ReportError(ret, &out);
1091 void MlInstance::MLSingleShotClose(const picojson::value& args, picojson::object& out) {
1092 ScopeLogger("args: %s", args.serialize().c_str());
1093 CHECK_ARGS(args, kId, double, out);
1095 auto id = static_cast<int>(args.get(kId).get<double>());
1097 auto ret = single_manager_.Close(id);
1099 ReportError(ret, &out);
1108 bool CreatePipelineArgumentsAreInvalid(const picojson::value& args) {
1111 auto arguments_valid = args.get(kId).is<double>();
1112 arguments_valid &= args.get(kDefinition).is<std::string>();
1113 arguments_valid &= (args.get(kPipelineStateChangeListenerName).is<std::string>() ||
1114 args.get(kPipelineStateChangeListenerName).is<picojson::null>());
1115 LoggerD("CreatePipeline arguments are %s", arguments_valid ? "valid" : "invalid");
1117 return !arguments_valid;
1122 void MlInstance::MLPipelineManagerCreatePipeline(const picojson::value& args,
1123 picojson::object& out) {
1124 ScopeLogger("args: %s", args.serialize().c_str());
1126 if (CreatePipelineArgumentsAreInvalid(args)) {
1127 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Could not create pipeline"}, &out);
1131 auto id = static_cast<int>(args.get(kId).get<double>());
1132 auto definition = args.get(kDefinition).get<std::string>();
1133 auto state_change_listener_name =
1134 args.get(kPipelineStateChangeListenerName).is<std::string>()
1135 ? args.get(kPipelineStateChangeListenerName).get<std::string>()
1138 auto ret = pipeline_manager_.CreatePipeline(id, definition, state_change_listener_name);
1141 ReportError(ret, &out);
1148 void MlInstance::MLPipelineGetState(const picojson::value& args, picojson::object& out) {
1149 ScopeLogger("args: %s", args.serialize().c_str());
1151 if (!args.get(kId).is<double>()) {
1152 LoggerD("id is not a number");
1153 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1156 auto id = static_cast<int>(args.get(kId).get<double>());
1158 picojson::value state_value{std::string{}};
1159 std::string* state_ptr = &state_value.get<std::string>();
1160 auto ret = pipeline_manager_.GetPipelineState(id, state_ptr);
1162 ReportError(ret, &out);
1166 ReportSuccess(state_value, out);
1169 void MlInstance::MLPipelineStart(const picojson::value& args, picojson::object& out) {
1170 ScopeLogger("args: %s", args.serialize().c_str());
1172 CHECK_ARGS(args, kId, double, out);
1174 auto id = static_cast<int>(args.get(kId).get<double>());
1176 PlatformResult result = pipeline_manager_.Start(id);
1179 ReportError(result, &out);
1186 void MlInstance::MLPipelineStop(const picojson::value& args, picojson::object& out) {
1187 ScopeLogger("args: %s", args.serialize().c_str());
1189 CHECK_ARGS(args, kId, double, out);
1191 auto id = static_cast<int>(args.get(kId).get<double>());
1193 PlatformResult result = pipeline_manager_.Stop(id);
1196 LogAndReportError(result, &out);
1203 void MlInstance::MLPipelineDispose(const picojson::value& args, picojson::object& out) {
1204 ScopeLogger("args: %s", args.serialize().c_str());
1206 if (!args.get(kId).is<double>()) {
1207 LoggerD("id is not a number");
1208 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1212 auto id = static_cast<int>(args.get(kId).get<double>());
1213 auto ret = pipeline_manager_.DisposePipeline(id);
1216 ReportError(ret, &out);
1223 void MlInstance::MLPipelineGetNodeInfo(const picojson::value& args, picojson::object& out) {
1224 ScopeLogger("args: %s", args.serialize().c_str());
1226 CHECK_ARGS(args, kId, double, out);
1227 CHECK_ARGS(args, kName, std::string, out);
1229 auto name = args.get(kName).get<std::string>();
1230 auto id = static_cast<int>(args.get(kId).get<double>());
1232 PlatformResult result = pipeline_manager_.GetNodeInfo(id, name);
1235 LogAndReportError(result, &out);
1242 void MlInstance::MLPipelineGetSource(const picojson::value& args, picojson::object& out) {
1243 ScopeLogger("args: %s", args.serialize().c_str());
1245 CHECK_ARGS(args, kId, double, out);
1246 CHECK_ARGS(args, kName, std::string, out);
1248 auto name = args.get(kName).get<std::string>();
1249 auto id = static_cast<int>(args.get(kId).get<double>());
1251 PlatformResult result = pipeline_manager_.GetSource(id, name);
1254 LogAndReportError(result, &out);
1261 void MlInstance::MLPipelineGetSwitch(const picojson::value& args, picojson::object& out) {
1262 ScopeLogger("args: %s", args.serialize().c_str());
1264 if (!args.get(kId).is<double>()) {
1265 LoggerD("id is not a number");
1266 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid pipeline"}, &out);
1270 if (!args.get(kName).is<std::string>()) {
1271 LoggerD("name is not a string");
1272 ReportError(PlatformResult{ErrorCode::ABORT_ERR, "Invalid name"}, &out);
1276 auto name = args.get(kName).get<std::string>();
1277 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1280 auto ret = pipeline_manager_.GetSwitch(name, pipeline_id, &type);
1282 LogAndReportError(ret, &out);
1286 out["type"] = picojson::value{type};
1290 void MlInstance::MLPipelineGetValve(const picojson::value& args, picojson::object& out) {
1291 ScopeLogger("args: %s", args.serialize().c_str());
1293 CHECK_ARGS(args, kId, double, out);
1294 CHECK_ARGS(args, kName, std::string, out);
1296 auto name = args.get(kName).get<std::string>();
1297 auto pipeline_id = args.get(kId).get<double>();
1299 auto ret = pipeline_manager_.GetValve(name, pipeline_id);
1301 LogAndReportError(ret, &out);
1308 void MlInstance::MLPipelineRegisterSinkListener(const picojson::value& args,
1309 picojson::object& out) {
1310 ScopeLogger("args: %s", args.serialize().c_str());
1312 CHECK_ARGS(args, kId, double, out);
1313 CHECK_ARGS(args, kName, std::string, out);
1314 CHECK_ARGS(args, kListenerName, std::string, out);
1316 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1317 auto sink_name = args.get(kName).get<std::string>();
1318 auto listener_name = args.get(kListenerName).get<std::string>();
1320 auto ret = pipeline_manager_.RegisterSinkListener(sink_name, pipeline_id, listener_name);
1322 LogAndReportError(ret, &out);
1329 void MlInstance::MLPipelineUnregisterSinkListener(const picojson::value& args,
1330 picojson::object& out) {
1331 ScopeLogger("args: %s", args.serialize().c_str());
1333 CHECK_ARGS(args, kId, double, out);
1334 CHECK_ARGS(args, kName, std::string, out);
1336 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1337 auto sink_name = args.get(kName).get<std::string>();
1339 auto ret = pipeline_manager_.UnregisterSinkListener(sink_name, pipeline_id);
1341 LogAndReportError(ret, &out);
1348 void MlInstance::MLPipelineManagerRegisterCustomFilter(const picojson::value& args,
1349 picojson::object& out) {
1350 ScopeLogger("args: %s", args.serialize().c_str());
1352 CHECK_ARGS(args, kName, std::string, out);
1353 CHECK_ARGS(args, kListenerName, std::string, out);
1354 CHECK_ARGS(args, kInputTensorsInfoId, double, out);
1355 CHECK_ARGS(args, kOutputTensorsInfoId, double, out);
1357 const auto& custom_filter_name = args.get(kName).get<std::string>();
1358 const auto& listener_name = args.get(kListenerName).get<std::string>();
1359 auto input_tensors_info_id = static_cast<int>(args.get(kInputTensorsInfoId).get<double>());
1360 auto output_tensors_info_id = static_cast<int>(args.get(kOutputTensorsInfoId).get<double>());
1362 TensorsInfo* input_tensors_info_ptr =
1363 GetTensorsInfoManager().GetTensorsInfo(input_tensors_info_id);
1364 if (!input_tensors_info_ptr) {
1366 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1367 ("Could not find TensorsInfo handle with given id: %d", input_tensors_info_id));
1371 TensorsInfo* output_tensors_info_ptr =
1372 GetTensorsInfoManager().GetTensorsInfo(output_tensors_info_id);
1373 if (!output_tensors_info_ptr) {
1375 PlatformResult(ErrorCode::ABORT_ERR, "Internal TensorsInfo error"), &out,
1376 ("Could not find TensorsInfo handle with given id: %d", output_tensors_info_id));
1380 auto ret = pipeline_manager_.RegisterCustomFilter(
1381 custom_filter_name, listener_name, input_tensors_info_ptr, output_tensors_info_ptr);
1383 LogAndReportError(ret, &out);
1390 void MlInstance::MLPipelineManagerCustomFilterOutput(const picojson::value& args,
1391 picojson::object& out) {
1392 ScopeLogger("args: %s", args.serialize().c_str());
1394 CHECK_ARGS(args, kName, std::string, out);
1395 CHECK_ARGS(args, kStatus, double, out);
1396 CHECK_ARGS(args, kRequestId, double, out);
1398 const auto& custom_filter_name = args.get(kName).get<std::string>();
1399 auto status = static_cast<int>(args.get(kStatus).get<double>());
1400 auto request_id = static_cast<int>(args.get(kRequestId).get<double>());
1402 auto ret = pipeline_manager_.CustomFilterOutput(custom_filter_name, request_id, status);
1404 LogAndReportError(ret, &out);
1411 void MlInstance::MLPipelineManagerUnregisterCustomFilter(const picojson::value& args,
1412 picojson::object& out) {
1413 ScopeLogger("args: %s", args.serialize().c_str());
1415 CHECK_ARGS(args, kName, std::string, out);
1417 const auto& custom_filter_name = args.get(kName).get<std::string>();
1419 auto ret = pipeline_manager_.UnregisterCustomFilter(custom_filter_name);
1421 LogAndReportError(ret, &out);
1428 void MlInstance::MLPipelineNodeInfoGetProperty(const picojson::value& args, picojson::object& out) {
1429 ScopeLogger("args: %s", args.serialize().c_str());
1431 CHECK_ARGS(args, kId, double, out);
1432 CHECK_ARGS(args, kNodeName, std::string, out);
1433 CHECK_ARGS(args, kName, std::string, out);
1434 CHECK_ARGS(args, kType, std::string, out);
1436 auto id = static_cast<int>(args.get(kId).get<double>());
1437 const auto& name = args.get(kName).get<std::string>();
1438 const auto& node_name = args.get(kNodeName).get<std::string>();
1439 const auto& type = args.get(kType).get<std::string>();
1441 PlatformResult result = pipeline_manager_.getProperty(id, node_name, name, type, &out);
1444 LogAndReportError(result, &out);
1451 void MlInstance::MLPipelineNodeInfoSetProperty(const picojson::value& args, picojson::object& out) {
1452 ScopeLogger("args: %s", args.serialize().c_str());
1454 CHECK_ARGS(args, kId, double, out);
1455 CHECK_ARGS(args, kNodeName, std::string, out);
1456 CHECK_ARGS(args, kName, std::string, out);
1457 CHECK_ARGS(args, kType, std::string, out);
1459 auto id = static_cast<int>(args.get(kId).get<double>());
1460 const auto& name = args.get(kName).get<std::string>();
1461 const auto& node_name = args.get(kNodeName).get<std::string>();
1462 const auto& type = args.get(kType).get<std::string>();
1464 CHECK_EXIST(args, kProperty, out);
1465 if (kBOOLEAN == type) {
1466 CHECK_TYPE(args, kProperty, bool, out, ErrorCode::TYPE_MISMATCH_ERR);
1467 } else if (kSTRING == type) {
1468 CHECK_TYPE(args, kProperty, std::string, out, ErrorCode::TYPE_MISMATCH_ERR);
1470 CHECK_TYPE(args, kProperty, double, out, ErrorCode::TYPE_MISMATCH_ERR);
1472 const picojson::value& property = args.get(kProperty);
1474 PlatformResult result = pipeline_manager_.setProperty(id, node_name, name, type, property);
1476 LogAndReportError(result, &out);
1483 void MlInstance::MLPipelineGetInputTensorsInfo(const picojson::value& args, picojson::object& out) {
1484 ScopeLogger("args: [%s]", args.serialize().c_str());
1486 CHECK_ARGS(args, kId, double, out);
1487 CHECK_ARGS(args, kName, std::string, out);
1489 auto id = static_cast<int>(args.get(kId).get<double>());
1490 const auto& name = args.get(kName).get<std::string>();
1493 PlatformResult result = pipeline_manager_.getInputTensorsInfo(id, name, &res_id);
1495 LogAndReportError(result, &out);
1502 void MlInstance::MLPipelineSourceInputData(const picojson::value& args, picojson::object& out) {
1503 ScopeLogger("args: [%s]", args.serialize().c_str());
1505 CHECK_ARGS(args, kId, double, out);
1506 CHECK_ARGS(args, kName, std::string, out);
1507 CHECK_ARGS(args, kTensorsDataId, double, out);
1509 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1510 auto& source_name = args.get(kName).get<std::string>();
1511 auto tensor_data_id = static_cast<int>(args.get(kTensorsDataId).get<double>());
1513 TensorsData* tensors_data = GetTensorsDataManager().GetTensorsData(tensor_data_id);
1515 if (nullptr == tensors_data) {
1516 LogAndReportError(PlatformResult(ErrorCode::ABORT_ERR, "Internal Source error"), &out,
1517 ("Could not get TensorData handle with given id: %d", tensor_data_id));
1521 auto ret = pipeline_manager_.SourceInputData(pipeline_id, source_name, tensors_data);
1523 LogAndReportError(ret, &out);
1530 void MlInstance::MLPipelineSwitchGetPadList(const picojson::value& args, picojson::object& out) {
1531 ScopeLogger("args: [%s]", args.serialize().c_str());
1533 CHECK_ARGS(args, kId, double, out);
1534 CHECK_ARGS(args, kName, std::string, out);
1536 auto pipeline_id = static_cast<int>(args.get(kId).get<double>());
1537 auto& switch_name = args.get(kName).get<std::string>();
1539 picojson::array pad_list;
1540 auto ret = pipeline_manager_.SwitchGetPadList(pipeline_id, switch_name, &pad_list);
1542 LogAndReportError(ret, &out);
1546 ReportSuccess(picojson::value{std::move(pad_list)}, out);
1549 void MlInstance::MLPipelineSwitchSelect(const picojson::value& args, picojson::object& out) {
1550 ScopeLogger("args: [%s]", args.serialize().c_str());
1552 CHECK_ARGS(args, kId, double, out);
1553 CHECK_ARGS(args, kName, std::string, out);
1554 CHECK_ARGS(args, kPadName, std::string, out);
1556 auto pipeline_id = args.get(kId).get<double>();
1557 auto& switch_name = args.get(kName).get<std::string>();
1558 auto& pad_name = args.get(kPadName).get<std::string>();
1560 auto ret = pipeline_manager_.SwitchSelect(pipeline_id, switch_name, pad_name);
1562 LogAndReportError(ret, &out);
1569 void MlInstance::MLPipelineValveSetOpen(const picojson::value& args, picojson::object& out) {
1570 ScopeLogger("args: %s", args.serialize().c_str());
1572 CHECK_ARGS(args, kId, double, out);
1573 CHECK_ARGS(args, kName, std::string, out);
1574 CHECK_ARGS(args, kOpen, bool, out);
1576 auto name = args.get(kName).get<std::string>();
1577 auto pipeline_id = args.get(kId).get<double>();
1578 auto open = args.get(kOpen).get<bool>();
1580 auto ret = pipeline_manager_.ValveSetOpen(pipeline_id, name, open);
1582 LogAndReportError(ret, &out);
1589 void MlInstance::MLPipelineValveIsOpen(const picojson::value& args, picojson::object& out) {
1590 ScopeLogger("args: %s", args.serialize().c_str());
1592 CHECK_ARGS(args, kId, double, out);
1593 CHECK_ARGS(args, kName, std::string, out);
1595 auto name = args.get(kName).get<std::string>();
1596 auto pipeline_id = args.get(kId).get<double>();
1599 auto ret = pipeline_manager_.ValveIsOpen(pipeline_id, name, &open);
1601 LogAndReportError(ret, &out);
1605 ReportSuccess(picojson::value{open}, out);
1616 } // namespace extension