fix not-found backend library issue
[platform/core/multimedia/inference-engine-interface.git] / src / inference_engine_common_impl.cpp
1 /**
2  * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include "inference_engine_error.h"
18 #include "inference_engine_common_impl.h"
19 #include "inference_engine_private_type.h"
20 #include "inference_engine_ini.h"
21 #include <fstream>
22 #include <iostream>
23 #include <unistd.h>
24 #include <time.h>
25 #include <dlfcn.h>
26 #include <experimental/filesystem>
27
28 extern "C"
29 {
30 #include <dlog.h>
31
32 #ifdef LOG_TAG
33 #undef LOG_TAG
34 #endif
35
36 #define LOG_TAG "INFERENCE_ENGINE_COMMON"
37 }
38
39 #define CHECK_ENGINE_INSTANCE(object)                    \
40         if (object == nullptr) {                             \
41                 LOGE("Inference engine handle is null.");        \
42                 return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; \
43         }
44
45 namespace fs = std::experimental::filesystem;
46 namespace InferenceEngineInterface
47 {
48 namespace Common
49 {
50         const char *BACKEND_PATH_INI_FILENAME =
51                                 SYSCONFDIR"/inference/inference_engine_backend_path.ini";
52         std::map<std::string, inference_backend_type_e> sApiFw =
53         {
54                 { "MLAPI", INFERENCE_BACKEND_MLAPI }
55         };
56
57         std::map<std::string, inference_backend_type_e> sBackend =
58         {
59                 { "TFLITE", INFERENCE_BACKEND_TFLITE },
60                 { "ARMNN", INFERENCE_BACKEND_ARMNN },
61                 { "ONE", INFERENCE_BACKEND_ONE },
62                 { "OPENCV", INFERENCE_BACKEND_OPENCV }
63         };
64
65         std::map<std::string, inference_backend_npu_type_e> sNpuBackend =
66         {
67                 { "VIVANTE", INFERENCE_BACKEND_NPU_VIVANTE },
68                 { "TRIV2", INFERENCE_BACKEND_NPU_TRIV2}
69         };
70
71         int sApiFwForTFLITE = -1, sApiFwForARMNN = -1, sApiFwForOPENCV = -1;
72         int sBackendForNpu = -1;
73
74         InferenceEngineCommon::InferenceEngineCommon() :
75                         mSelectedBackendEngine(INFERENCE_BACKEND_NONE),
76                         mUseProfiler(false),
77                         mProfilerDumpType(IE_PROFILER_DUMP_MIN),
78                         mBackendModule(),
79                         mBackendHandle()
80         {
81                 LOGI("ENTER");
82
83                 LOGI("LEAVE");
84         }
85
86         InferenceEngineCommon::~InferenceEngineCommon()
87         {
88                 LOGW("ENTER");
89
90                 if (mUseProfiler == true) {
91                         mProfiler.Dump(mProfilerDumpType);
92                 }
93
94                 LOGW("LEAVE");
95         }
96
97         int InferenceEngineCommon::UseMLAPI(const int backend_type, const int device_type)
98         {
99                 if (backend_type == INFERENCE_BACKEND_MLAPI ||
100                                 device_type == INFERENCE_TARGET_CUSTOM ||
101                                 backend_type == INFERENCE_BACKEND_ONE ||
102                                 backend_type == INFERENCE_BACKEND_NNTRAINER ||
103                                 backend_type == INFERENCE_BACKEND_SNPE ||
104                                 (backend_type == INFERENCE_BACKEND_TFLITE && sApiFwForTFLITE == INFERENCE_BACKEND_MLAPI) ||
105                                 (backend_type == INFERENCE_BACKEND_ARMNN && sApiFwForARMNN == INFERENCE_BACKEND_MLAPI))
106                         return 1;
107
108                 return 0;
109         }
110
111         int InferenceEngineCommon::GetNpuBackendType(dictionary *dict, const char *section_name)
112         {
113                 // Parse a backend path for NPU device.
114                 const char *parsed_str = iniparser_getstring(dict, section_name, NULL);
115                 if (parsed_str == NULL) {
116                         LOGI("No type parsed for %s section name.", section_name);
117                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
118                 }
119
120                 std::map<std::string, inference_backend_npu_type_e>::iterator it = sNpuBackend.find(parsed_str);
121                 if (it == sNpuBackend.end()) {
122                         LOGE("Invalid NPU backend name.");
123                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
124                 }
125
126                 return it->second;
127         }
128
129         int InferenceEngineCommon::GetApiFrameworkType(dictionary *dict, const char *section_name)
130         {
131                 // Parse a backend path for NPU device.
132                 const char *parsed_str = iniparser_getstring(dict, section_name, NULL);
133                 if (parsed_str == NULL) {
134                         LOGI("No type parsed for %s section name.", section_name);
135                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
136                 }
137
138                 std::map<std::string, inference_backend_type_e>::iterator it = sApiFw.find(parsed_str);
139                 if (it == sApiFw.end()) {
140                         LOGE("Invalid API framework name.");
141                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
142                 }
143
144                 return it->second;
145         }
146
147         int InferenceEngineCommon::LoadConfigFile(std::string ini_file_path)
148         {
149                 int ret = INFERENCE_ENGINE_ERROR_NONE;
150                 std::string strNpuBackend = "", strApiFwName = "";
151
152                 if (ini_file_path.empty())
153                         ini_file_path = BACKEND_PATH_INI_FILENAME;
154
155                 LOGI("%s configuration file will be used.\n", ini_file_path.c_str());
156
157                 dictionary *dict = iniparser_load(ini_file_path.c_str());
158                 if (dict == NULL) {
159                         LOGW("Fail to load %s file.\n", ini_file_path.c_str());
160                         LOGW("so it will not use default backend path.\n");
161                         return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
162                 }
163
164                 sBackendForNpu = GetNpuBackendType(dict, "NPU backend:type");
165                 if (sBackendForNpu < 0) {
166                         LOGI("No NPU backend type from ini file.");
167                         LOGI("This platform cannot use NPU acceleration for inference.");
168                 }
169
170                 LOGI("API FW = %s, NPU = %d", sBackendForNpu > 0 ? "MLAPI" : "Internal", sBackendForNpu);
171
172                 sApiFwForTFLITE = GetApiFrameworkType(dict, "TFLITE:API framework");
173                 if (sApiFwForTFLITE < 0) {
174                         LOGI("No API framework type from ini file.");
175                         LOGI("So in default, internal API will be used for TFLITE.");
176                 }
177
178                 LOGI("API FW = %s for TFLITE.", sApiFwForTFLITE > 0 ? "MLAPI" : "Internal");
179
180                 sApiFwForARMNN = GetApiFrameworkType(dict, "ARMNN:API framework");
181                 if (sApiFwForARMNN < 0) {
182                         LOGI("No API framework type from ini file.");
183                         LOGI("So in default, internal API will be used for ARMNN.");
184                 }
185
186                 LOGI("API FW = %s for ARMNN.", sApiFwForARMNN > 0 ? "MLAPI" : "Internal");
187
188                 iniparser_freedict(dict);
189
190                 return ret;
191         }
192
193         int InferenceEngineCommon::CheckTensorBuffers(IETensorBuffer &buffers)
194         {
195                 if (buffers.empty()) {
196                         LOGE("tensor buffer vector is empty.");
197                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
198                 }
199
200                 for (auto& buffer : buffers) {
201                         const inference_engine_tensor_buffer& tensor_buffer = buffer.second;
202                         if (tensor_buffer.buffer == nullptr || tensor_buffer.size == 0) {
203                                 LOGE("tensor buffer pointer is null or tensor buffer size is 0.");
204                                 return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
205                         }
206
207                         if (tensor_buffer.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
208                                 tensor_buffer.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
209                                 LOGE("tensor data type is invalid.");
210                                 return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
211                         }
212                 }
213
214                 return INFERENCE_ENGINE_ERROR_NONE;
215         }
216
217         int InferenceEngineCommon::CheckLayerProperty(
218                         inference_engine_layer_property &property)
219         {
220                 // Verity tensor info values.
221                 for (auto& layer : property.layers) {
222                         const std::string& name = layer.first;
223
224                         if (name.empty()) {
225                                 LOGE("layer name is invalid.");
226                                 return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
227                         }
228
229                         const inference_engine_tensor_info& tensor_info = layer.second;
230                         if (tensor_info.shape.empty() || tensor_info.size == 0) {
231                                 LOGE("shape size of tensor info or size of it is 0.");
232                                 return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
233                         }
234
235                         if (tensor_info.data_type <= INFERENCE_TENSOR_DATA_TYPE_NONE ||
236                                 tensor_info.data_type >= INFERENCE_TENSOR_DATA_TYPE_MAX) {
237                                 LOGE("tensor data type is invalid.");
238                                 return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
239                         }
240
241                         // TODO. we may need to check shape type also.
242                 }
243
244                 return INFERENCE_ENGINE_ERROR_NONE;
245         }
246
247         int InferenceEngineCommon::EnableProfiler(bool enable)
248         {
249                 if (enable != true && enable != false) {
250                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
251                 }
252
253                 mUseProfiler = enable;
254
255                 if (mUseProfiler == true) {
256                         // In default, profile data will be stored to a given file.
257                         mProfilerDumpType = IE_PROFILER_DUMP_FILE;
258                 }
259
260                 return INFERENCE_ENGINE_ERROR_NONE;
261         }
262
263         int InferenceEngineCommon::DumpProfileToConsole(void)
264         {
265                 if (mUseProfiler == false) {
266                         std::cout << "Enable Profiler."
267                                           << "\n";
268                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
269                 }
270
271                 mProfilerDumpType = IE_PROFILER_DUMP_CONSOLE;
272                 return INFERENCE_ENGINE_ERROR_NONE;
273         }
274
275         int InferenceEngineCommon::DumpProfileToFile(const std::string filename)
276         {
277                 if (mUseProfiler == false) {
278                         std::cout << "Enable Profiler."
279                                           << "\n";
280                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
281                 }
282
283                 mProfilerDumpType = IE_PROFILER_DUMP_FILE;
284                 mProfiler.SetDumpFilename(filename);
285
286                 return INFERENCE_ENGINE_ERROR_NONE;
287         }
288
289         int
290         InferenceEngineCommon::InitBackendEngine(const std::string &backend_path,
291                                                                                          int backend_type, int device_type)
292         {
293                 LOGI("lib: %s", backend_path.c_str());
294                 mBackendModule = dlopen(backend_path.c_str(), RTLD_NOW);
295                 LOGI("HANDLE : [%p]", mBackendModule);
296
297                 if (!mBackendModule) {
298                         LOGE("Fail to dlopen %s", backend_path.c_str());
299                         LOGE("Error: %s\n", dlerror());
300                         return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
301                 }
302
303                 init_t *EngineInit =
304                                 (init_t *) dlsym(mBackendModule, "EngineCommonInit");
305                 char *error = NULL;
306                 if ((error = dlerror()) != NULL) {
307                         LOGE("Error: %s\n", error);
308                         dlclose(mBackendModule);
309                         mBackendModule = nullptr;
310                         return INFERENCE_ENGINE_ERROR_INTERNAL;
311                 }
312
313                 mBackendHandle = EngineInit();
314                 if (mBackendHandle == NULL) {
315                         LOGE("Fail to EngineInit");
316                         dlclose(mBackendModule);
317                         mBackendModule = nullptr;
318                         return INFERENCE_ENGINE_ERROR_INTERNAL;
319                 }
320
321                 if (backend_type == INFERENCE_BACKEND_MLAPI)
322                         LOGW("DEPRECATION WARNING: INFERENCE_BACKEND_MLAPI has been deprecated and will be removed from next release.");
323
324                 LOGI("backend_type = %d, device_type = %d", backend_type, device_type);
325
326                 // If user set MLAPI type as backend type and device type is CPU or GPU
327                 // then TFLITE tensor filter of NNStreamer will be used in default.
328                 if (backend_type == INFERENCE_BACKEND_MLAPI &&
329                                 (device_type & INFERENCE_TARGET_CPU || device_type & INFERENCE_TARGET_GPU)) {
330                         backend_type = INFERENCE_BACKEND_TFLITE;
331                         LOGE("MLAPI type is used only for NPU device.\n");
332                         return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
333                 }
334
335                 // If user requested an inference with ONE backend and CPU/GPU device then
336                 // pass ONE backend type to MLAPI backend.
337                 if (backend_type == INFERENCE_BACKEND_ONE &&
338                                 (device_type & INFERENCE_TARGET_CPU || device_type & INFERENCE_TARGET_GPU))
339                         backend_type = INFERENCE_BACKEND_ONE;
340
341                 // If NPU type is declared in ini file then pass the type to
342                 // a given inference engine backend.
343                 if (backend_type == INFERENCE_BACKEND_MLAPI &&
344                                 device_type == INFERENCE_TARGET_CUSTOM && sBackendForNpu > 0)
345                         backend_type = sBackendForNpu;
346
347                 int ret = mBackendHandle->SetPrivateData(&backend_type);
348                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
349                         LOGE("Failed to set a tensor filter plugin type for MLAPI.");
350                         return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
351                 }
352
353                 ret = mBackendHandle->SetTargetDevices(device_type);
354                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
355                         LOGE("Failed to set target device.");
356                         return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
357                 }
358
359                 return INFERENCE_ENGINE_ERROR_NONE;
360         }
361
362         int InferenceEngineCommon::BindBackend(inference_engine_config *config)
363         {
364                 LOGI("ENTER");
365
366                 if (mBackendHandle) {
367                         LOGE("Already backend engine has been initialized.");
368                         return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
369                 }
370
371                 if (config == nullptr) {
372                         LOGE("config object is invalid.");
373                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
374                 }
375
376                 if (mUseProfiler == true) {
377                         // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
378                         mProfiler.Start(IE_PROFILER_MEMORY);
379                 }
380
381                 if (!config->backend_name.empty()) {
382                         // if backend name is "mlapi" then backend path is decided by a given backend_type.
383                         // Otherwise, backend_name has higher priority than backend_type.
384                         // So other backend name(not mlapi) is given then set backend_type to -1
385                         // so that the backend path can be decided by a given backend_name.
386                         // TODO. drop backend_type later because only one path should exist for one purpose.
387                         if (config->backend_name.compare("mlapi") != 0)
388                                 config->backend_type = -1;
389                 }
390
391                 // If backend_type of config is -1 then update it according to backend_name.
392                 if (config->backend_type == -1) {
393                         std::map<std::string,int> BackendTable;
394
395                         BackendTable.insert(std::make_pair("tflite",INFERENCE_BACKEND_TFLITE));
396                         BackendTable.insert(std::make_pair("armnn",INFERENCE_BACKEND_ARMNN));
397                         BackendTable.insert(std::make_pair("opencv",INFERENCE_BACKEND_OPENCV));
398                         BackendTable.insert(std::make_pair("mlapi",INFERENCE_BACKEND_MLAPI));
399                         BackendTable.insert(std::make_pair("one",INFERENCE_BACKEND_ONE));
400                         BackendTable.insert(std::make_pair("nntrainer", INFERENCE_BACKEND_NNTRAINER));
401                         BackendTable.insert(std::make_pair("snpe", INFERENCE_BACKEND_SNPE));
402
403                         config->backend_type = BackendTable[config->backend_name];
404                 } else {
405                         std::map<int,std::string> BackendTable;
406
407                         BackendTable.insert(std::make_pair(INFERENCE_BACKEND_TFLITE, "tflite"));
408                         BackendTable.insert(std::make_pair(INFERENCE_BACKEND_ARMNN, "armnn"));
409                         BackendTable.insert(std::make_pair(INFERENCE_BACKEND_OPENCV, "opencv"));
410                         BackendTable.insert(std::make_pair(INFERENCE_BACKEND_MLAPI, "mlapi"));
411                         BackendTable.insert(std::make_pair(INFERENCE_BACKEND_ONE, "one"));
412                         BackendTable.insert(std::make_pair(INFERENCE_BACKEND_NNTRAINER, "nntrainer"));
413                         BackendTable.insert(std::make_pair(INFERENCE_BACKEND_SNPE, "snpe"));
414
415                         config->backend_name = BackendTable[config->backend_type];
416                 }
417
418                 std::string backendLibName;
419
420                 // For two backend types - MLAPI and ONE, MLAPI will be used as API framework in default.
421                 // And for all NPU devices passed with INFERENCE_TARGET_CUSTOM type, MLAPI will be used as API framework in default.
422                 if (UseMLAPI(config->backend_type, config->target_devices))
423                         backendLibName = "libinference-engine-mlapi.so";
424                 else
425                         backendLibName = "libinference-engine-" + config->backend_name + ".so";
426
427                 int ret = InitBackendEngine(backendLibName, config->backend_type, config->target_devices);
428                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
429                         return ret;
430                 }
431
432                 if (mUseProfiler == true) {
433                         if (config->backend_type == INFERENCE_BACKEND_ONE) {
434                                 std::string backend_name = "one";
435                                 mProfiler.AddBackendName(backend_name);
436                         } else {
437                                 mProfiler.AddBackendName(config->backend_name);
438                         }
439                 }
440
441                 LOGI("LEAVE");
442
443                 return INFERENCE_ENGINE_ERROR_NONE;
444         }
445
446         void InferenceEngineCommon::UnbindBackend(void)
447         {
448                 LOGW("ENTER");
449
450                 if (mUseProfiler == true) {
451                         // Memory usage will be measured between BindBackend ~ UnbindBackend callbacks.
452                         mProfiler.Stop(IE_PROFILER_MEMORY);
453                 }
454
455                 if (mBackendModule) {
456                         destroy_t *engineDestroy =
457                                         (destroy_t *) dlsym(mBackendModule, "EngineCommonDestroy");
458                         engineDestroy(mBackendHandle);
459                         dlclose(mBackendModule);
460                         mBackendHandle = nullptr;
461                         mBackendModule = nullptr;
462                 }
463
464                 LOGW("LEAVE");
465         }
466
467         int InferenceEngineCommon::SetTargetDevices(int types)
468         {
469                 CHECK_ENGINE_INSTANCE(mBackendHandle);
470
471                 if (types <= INFERENCE_TARGET_NONE || types >= INFERENCE_TARGET_MAX) {
472                         LOGE("Given target device types(%d) are invalid.", types);
473                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
474                 }
475
476                 LOGI("target device type is %d", types);
477
478                 int ret = mBackendHandle->SetTargetDevices(types);
479                 if (ret != INFERENCE_ENGINE_ERROR_NONE)
480                         LOGE("Fail to SetTargetDevice");
481
482                 if (mUseProfiler == true) {
483                         mProfiler.AddTargetDevices(types);
484                 }
485
486                 return ret;
487         }
488
489         int InferenceEngineCommon::SetCLTuner(const inference_engine_cltuner *cltuner)
490         {
491                 CHECK_ENGINE_INSTANCE(mBackendHandle);
492
493                 if (cltuner->active && (cltuner->tuning_mode <= INFERENCE_ENGINE_CLTUNER_MIN ||
494                                                                 cltuner->tuning_mode >= INFERENCE_ENGINE_CLTUNER_MAX)) {
495                         LOGE("Invalid tuning mode of CLTuner.(%d)", cltuner->tuning_mode);
496                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
497                 }
498
499                 int ret = mBackendHandle->SetCLTuner(cltuner);
500                 if (ret != INFERENCE_ENGINE_ERROR_NONE)
501                         LOGE("Fail to SetCLTuner");
502
503                 return ret;
504         }
505
506         int InferenceEngineCommon::Load(std::vector<std::string> model_paths,
507                                                                         inference_model_format_e model_format)
508         {
509                 LOGI("ENTER");
510
511                 CHECK_ENGINE_INSTANCE(mBackendHandle);
512
513                 if (mUseProfiler == true) {
514                         mProfiler.AddModelName(model_paths[0]);
515                         mProfiler.PushEnv();
516                         mProfiler.Start(IE_PROFILER_LATENCY);
517                 }
518
519                 int ret = mBackendHandle->Load(model_paths, model_format);
520                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
521                         LOGE("Fail to load InferenceEngineVision. All profile data will be set to 0.");
522                         mProfiler.SetInvalid();
523                 }
524
525                 if (mUseProfiler == true) {
526                         mProfiler.Stop(IE_PROFILER_LATENCY, "Load");
527
528                         // Set the profile data to 0 for Run if the profiling failed, which will be needed by visualizer.
529                         if (mProfiler.IsInvalid() == true) {
530                                 mProfiler.Start(IE_PROFILER_LATENCY);
531                                 mProfiler.Stop(IE_PROFILER_LATENCY, "Run");
532                         }
533                 }
534
535                 LOGI("LEAVE");
536
537                 return ret;
538         }
539
540         int InferenceEngineCommon::GetInputTensorBuffers(IETensorBuffer &buffers)
541         {
542                 CHECK_ENGINE_INSTANCE(mBackendHandle);
543
544                 int ret = mBackendHandle->GetInputTensorBuffers(buffers);
545                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
546                         LOGE("Failed to get input tensor buffers.");
547                         return ret;
548                 }
549
550                 // If backend engine doesn't provide tensor buffers then just return.
551                 // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
552                 if (buffers.empty()) {
553                         return ret;
554                 }
555
556                 return CheckTensorBuffers(buffers);
557         }
558
559         int InferenceEngineCommon::GetOutputTensorBuffers(IETensorBuffer &buffers)
560         {
561                 CHECK_ENGINE_INSTANCE(mBackendHandle);
562
563                 int ret = mBackendHandle->GetOutputTensorBuffers(buffers);
564                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
565                         LOGE("Failed to get output tensor buffers.");
566                         return ret;
567                 }
568
569                 // If backend engine doesn't provide tensor buffers then just return.
570                 // In this case, InferenceEngineCommon framework will allocate the tensor buffers.
571                 if (buffers.empty()) {
572                         return ret;
573                 }
574
575                 return CheckTensorBuffers(buffers);
576         }
577
578         int InferenceEngineCommon::GetInputLayerProperty(
579                         inference_engine_layer_property &property)
580         {
581                 CHECK_ENGINE_INSTANCE(mBackendHandle);
582
583                 int ret = mBackendHandle->GetInputLayerProperty(property);
584                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
585                         LOGE("Failed to get input layer property.");
586                         return ret;
587                 }
588
589                 // If backend engine doesn't provide input layer property information then just return.
590                 // In this case, user has to provide the information manually.
591                 if (property.layers.empty()) {
592                         LOGI("backend doesn't provide input layer property.");
593                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
594                 }
595
596                 return CheckLayerProperty(property);
597         }
598
599         int InferenceEngineCommon::GetOutputLayerProperty(
600                         inference_engine_layer_property &property)
601         {
602                 CHECK_ENGINE_INSTANCE(mBackendHandle);
603
604                 int ret = mBackendHandle->GetOutputLayerProperty(property);
605                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
606                         LOGE("Failed to get output layer property.");
607                         return ret;
608                 }
609
610                 // If backend engine doesn't provide output layer property information then just return.
611                 // In this case, user has to provide the information manually.
612                 if (property.layers.empty()) {
613                         LOGI("backend doesn't provide output layer property.");
614                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
615                 }
616
617                 return CheckLayerProperty(property);
618         }
619
620         int InferenceEngineCommon::SetInputLayerProperty(
621                         inference_engine_layer_property &property)
622         {
623                 CHECK_ENGINE_INSTANCE(mBackendHandle);
624
625                 if (property.layers.empty()) {
626                         LOGE("property is empty.");
627                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
628                 }
629
630                 int ret = CheckLayerProperty(property);
631                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
632                         LOGE("Given input layer property is invalid.");
633                         return ret;
634                 }
635
636                 return mBackendHandle->SetInputLayerProperty(property);
637         }
638
639         int InferenceEngineCommon::SetOutputLayerProperty(
640                         inference_engine_layer_property &property)
641         {
642                 CHECK_ENGINE_INSTANCE(mBackendHandle);
643
644                 if (property.layers.empty()) {
645                         LOGE("property is empty.");
646                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
647                 }
648
649                 int ret = CheckLayerProperty(property);
650                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
651                         LOGE("Given output layer property is invalid.");
652                         return ret;
653                 }
654
655                 return mBackendHandle->SetOutputLayerProperty(property);
656         }
657
658         int InferenceEngineCommon::GetBackendCapacity(
659                         inference_engine_capacity *capacity)
660         {
661                 CHECK_ENGINE_INSTANCE(mBackendHandle);
662
663                 if (capacity == nullptr) {
664                         LOGE("Given inference_engine_capacity object is invalid.");
665                         return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER;
666                 }
667
668                 return mBackendHandle->GetBackendCapacity(capacity);
669         }
670
671         int InferenceEngineCommon::Run(IETensorBuffer &input_buffers,
672                         IETensorBuffer &output_buffers)
673         {
674                 CHECK_ENGINE_INSTANCE(mBackendHandle);
675
676                 if (mUseProfiler == true) {
677                         mProfiler.Start(IE_PROFILER_LATENCY);
678                 }
679
680                 int ret = mBackendHandle->Run(input_buffers, output_buffers);
681                 if (ret != INFERENCE_ENGINE_ERROR_NONE) {
682                         LOGE("Failed to inference. All profile data will be set to 0.");
683                         mProfiler.SetInvalid();
684                 }
685
686                 if (mUseProfiler == true) {
687                         mProfiler.Stop(IE_PROFILER_LATENCY, "Run");
688                 }
689
690                 return ret;
691         }
692
693 } /* Common */
694 } /* InferenceEngineInterface */