From e5343720d0f958224fa9191b5704edaaee1b7a6b Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 4 Jul 2019 19:26:17 +0900 Subject: [PATCH 02/16] Initial codes 1. Provides inference-engine-interface-common for general purpose inference 2. Provides inference-engine-interface-vision for vision specific inference Signed-off-by: Tae-Young Chung --- CMakeLists.txt | 141 ++++++++++++++++ LICENSE.APLv2 | 206 ++++++++++++++++++++++++ README.md | 2 + common/inference_engine_common_impl.cpp | 173 ++++++++++++++++++++ include/inference_engine_common.h | 64 ++++++++ include/inference_engine_common_impl.h | 76 +++++++++ include/inference_engine_error.h | 65 ++++++++ include/inference_engine_type.h | 104 ++++++++++++ include/inference_engine_vision.h | 88 ++++++++++ include/inference_engine_vision_impl.h | 90 +++++++++++ inference-engine-interface-common.manifest | 5 + inference-engine-interface-common.pc.in | 14 ++ inference-engine-interface-vision.manifest | 5 + inference-engine-interface-vision.pc.in | 14 ++ packaging/inference-engine-interface.spec | 101 ++++++++++++ vision/inference_engine_vision_impl.cpp | 250 +++++++++++++++++++++++++++++ 16 files changed, 1398 insertions(+) create mode 100644 CMakeLists.txt create mode 100644 LICENSE.APLv2 create mode 100644 README.md create mode 100644 common/inference_engine_common_impl.cpp create mode 100644 include/inference_engine_common.h create mode 100644 include/inference_engine_common_impl.h create mode 100644 include/inference_engine_error.h create mode 100644 include/inference_engine_type.h create mode 100644 include/inference_engine_vision.h create mode 100644 include/inference_engine_vision_impl.h create mode 100644 inference-engine-interface-common.manifest create mode 100644 inference-engine-interface-common.pc.in create mode 100644 inference-engine-interface-vision.manifest create mode 100644 inference-engine-interface-vision.pc.in create mode 100644 packaging/inference-engine-interface.spec create mode 100644 vision/inference_engine_vision_impl.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..7ca6ea7 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,141 @@ + +CMAKE_MINIMUM_REQUIRED(VERSION 2.6) +SET(fw_name "inference-engine-interface") +SET(fw_name_vision ${fw_name}-vision) +SET(fw_name_common ${fw_name}-common) + +PROJECT(${fw_name_vision}) + +SET(CMAKE_INSTALL_PREFIX /usr) +SET(PREFIX ${CMAKE_INSTALL_PREFIX}) + +SET(INC_DIR "${PROJECT_SOURCE_DIR}/include") + +SET(dependents "dlog") +SET(pc_dependents "capi-base-common") +INCLUDE(FindPkgConfig) + +pkg_check_modules(${fw_name_vision} REQUIRED ${dependents}) +FOREACH(flag ${${fw_name_vision}_CFLAGS}) + SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}") + SET(EXTRA_CXXFLAGS "${EXTRA_CXXFLAGS} ${flag}") +ENDFOREACH(flag) + +#OpenCV +FIND_PACKAGE(OpenCV REQUIRED core) +if(NOT OpenCV_FOUND) + MESSAGE(SEND_ERROR "OpenCV NOT FOUND") + RETURN() +else() + INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS}) +endif() + +SET(CMAKE_C_FLAGS "-I./include -I./include/headers ${CMAKE_C_FLAGS} ${EXTRA_CFLAGS} -fPIC -Wall -w") +SET(CMAKE_C_FLAGS_DEBUG "-O0 -g") + +SET(CMAKE_CXX_FLAGS "-I./include -I./include/headers ${CMAKE_CXX_FLAGS} ${EXTRA_CXXFLAGS} -fPIC") +SET(CMAKE_CXX_FLAGS_DEBUG "-O0 -g --w") + +ADD_DEFINITIONS("-DPREFIX=\"${CMAKE_INSTALL_PREFIX}\"") +ADD_DEFINITIONS("-DTIZEN_DEBUG") + +SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=${LIB_INSTALL_DIR}") + +#common +aux_source_directory(common SOURCES) +ADD_LIBRARY(${fw_name_common} SHARED ${SOURCES}) + +TARGET_LINK_LIBRARIES(${fw_name_common} dlog) + + +SET_TARGET_PROPERTIES(${fw_name_common} + PROPERTIES + VERSION ${FULLVER} + SOVERSION ${MAJORVER} + CLEAN_DIRECT_OUTPUT 1 +) + +INSTALL(TARGETS ${fw_name_common} DESTINATION ${LIB_INSTALL_DIR}) +INSTALL( + DIRECTORY ${INC_DIR}/ DESTINATION include/media + FILES_MATCHING + PATTERN "*_private.h" EXCLUDE + PATTERN "*.h" + ) + +SET(PC_NAME ${fw_namefw_name_common_vision}) +SET(PC_REQUIRED ${pc_dependents}) +SET(PC_LDFLAGS -l${fw_name_common}) +SET(PC_CFLAGS -I\${includedir}/media) + +CONFIGURE_FILE( + ${fw_name_common}.pc.in + ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name_common}.pc + @ONLY +) +INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name_common}.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) + +#vision +aux_source_directory(vision SOURCES) +ADD_LIBRARY(${fw_name_vision} SHARED ${SOURCES}) + +TARGET_LINK_LIBRARIES(${fw_name_vision} ${OpenCV_LIBS} dlog) + + +SET_TARGET_PROPERTIES(${fw_name_vision} + PROPERTIES + VERSION ${FULLVER} + SOVERSION ${MAJORVER} + CLEAN_DIRECT_OUTPUT 1 +) + +INSTALL(TARGETS ${fw_name_vision} DESTINATION ${LIB_INSTALL_DIR}) +INSTALL( + DIRECTORY ${INC_DIR}/ DESTINATION include/media + FILES_MATCHING + PATTERN "*_private.h" EXCLUDE + PATTERN "*.h" + ) + +SET(PC_NAME ${fw_name_vision}) +SET(PC_REQUIRED ${pc_dependents}) +SET(PC_LDFLAGS -l${fw_name_vision}) +SET(PC_CFLAGS -I\${includedir}/media) + +CONFIGURE_FILE( + ${fw_name_vision}.pc.in + ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name_vision}.pc + @ONLY +) +INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name_vision}.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig) + +IF(UNIX) + +ADD_CUSTOM_TARGET (distclean @echo cleaning for source distribution) +ADD_CUSTOM_COMMAND( + DEPENDS clean + COMMENT "distribution clean" + COMMAND find + ARGS . + -not -name config.cmake -and \( + -name tester.c -or + -name Testing -or + -name CMakeFiles -or + -name cmake.depends -or + -name cmake.check_depends -or + -name CMakeCache.txt -or + -name cmake.check_cache -or + -name *.cmake -or + -name Makefile -or + -name core -or + -name core.* -or + -name gmon.out -or + -name install_manifest.txt -or + -name *.pc -or + -name *~ \) + | grep -v TC | xargs rm -rf + TARGET distclean + VERBATIM +) + +ENDIF(UNIX) \ No newline at end of file diff --git a/LICENSE.APLv2 b/LICENSE.APLv2 new file mode 100644 index 0000000..bbe9d02 --- /dev/null +++ b/LICENSE.APLv2 @@ -0,0 +1,206 @@ +Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + diff --git a/README.md b/README.md new file mode 100644 index 0000000..3eecd82 --- /dev/null +++ b/README.md @@ -0,0 +1,2 @@ +# Inference Engine Interface +This is an interface of various inference engines such as Tensorflow, Caffe, OpenCV and so on. \ No newline at end of file diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp new file mode 100644 index 0000000..24afee2 --- /dev/null +++ b/common/inference_engine_common_impl.cpp @@ -0,0 +1,173 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include +#include + +extern "C" { + +#include + +#ifdef LOG_TAG +#undef LOG_TAG +#endif + +#define LOG_TAG "INFERENCE_ENGINE_COMMON" +} + +namespace InferenceEngineInterface { +namespace Common { + +const char* engineLibs[] = { + "libinference-engine-caffe.so", + "libinference-engine-tf.so", + "libinference-engine-tflite.so"}; + +InferenceEngineCommon::InferenceEngineCommon(inference_backend_type_e backend) : + mBackend(backend) +{ + LOGE("ENTER"); + LOGE("LEAVE"); +} + +InferenceEngineCommon::~InferenceEngineCommon() +{ + LOGW("ENTER"); + destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineCommonDestroy"); + // NULL CHECK? + engineDestroy(engine); + dlclose(handle); + + LOGW("LEAVE"); +} + +int InferenceEngineCommon::Init(std::string configFile, + std::string weightFile, std::string userFile) +{ + LOGW("ENTER"); + char *error = NULL; + handle = dlopen(engineLibs[mBackend], RTLD_LAZY); + if (!handle) { + LOGE("Fail to dlopen %s", engineLibs[mBackend]); + LOGE("Error: %s\n", dlerror()); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } + + init_t* EngineInit = (init_t *)dlsym(handle, "EngineCommonInit"); + if ((error = dlerror()) != NULL) { + LOGE("Error: %s\n", error); + dlclose(handle); + return INFERENCE_ENGINE_ERROR_INTERNAL; + } + + engine = EngineInit(configFile, weightFile, userFile); + if (engine == NULL) { + LOGE("Fail to EngineInit"); + dlclose(handle); + return INFERENCE_ENGINE_ERROR_INTERNAL; + } + + LOGW("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; +} + +void InferenceEngineCommon::Deinit() +{ + ; +} + +int InferenceEngineCommon::SetInputTensorParam() +{ + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; +} + +int InferenceEngineCommon::SetInputTensorParamNode(std::string node) +{ + int ret = engine->SetInputTensorParamNode(node); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetInputTensorParamNode"); + + return ret; +} + +int InferenceEngineCommon::SetOutputTensorParam() +{ + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; +} + +int InferenceEngineCommon::SetOutputTensorParamNode(std::string node) +{ + int ret = engine->SetOutPutTensorParamNodes(node); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetOutputTensorParamNodes"); + + return ret; +} + +int InferenceEngineCommon::SetTargetDevice(inference_target_type_e type) +{ + int ret = engine->SetTargetDevice(type); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetTargetDevice"); + + return ret; +} + +int InferenceEngineCommon::Load() +{ + int ret = engine->Load(); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to load InferenceEngineVision"); + + ret = engine->CreateInputLayerPassage(); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to load CreateInputLayerPassage"); + + + ret = engine->PrepareInputLayerPassage(); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to load PrepareInputLayerPassage"); + + + return ret; +} + +int InferenceEngineCommon::Run(std::vector tensor) +{ + int ret = engine->Run(tensor); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to run InferenceEngineVision"); + + return ret; +} + +int InferenceEngineCommon::GetInferenceResult(std::vector>& dimInfo, std::vector& results) +{ + int ret = engine->GetInferenceResult(dimInfo, results); + + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to GetInferenceResult"); + + return ret; +} +} /* Common */ +} /* InferenceEngineInterface */ diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h new file mode 100644 index 0000000..6dc02ff --- /dev/null +++ b/include/inference_engine_common.h @@ -0,0 +1,64 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_COMMON_H__ +#define __INFERENCE_ENGINE_COMMON_H__ + +#include +#include + +#include "inference_engine_type.h" + +namespace InferenceEngineInterface { +namespace Common { + +class IInferenceEngineCommon { +public: + + virtual ~IInferenceEngineCommon() {}; + + // InputTensor + virtual int SetInputTensorParam() = 0; + + virtual int SetInputTensorParamNode(std::string node) = 0; + + + // OutputTensor + virtual int SetOutputTensorParam() = 0; + + virtual int SetOutPutTensorParamNodes(std::string node) = 0; + + virtual int SetTargetDevice(inference_target_type_e type) = 0; + + // Load and Run + virtual int Load() = 0; + + virtual int CreateInputLayerPassage() = 0; + + virtual int PrepareInputLayerPassage() = 0; + + + virtual int Run(std::vector tensor) = 0; + + virtual int GetInferenceResult(std::vector>& dimInfo, std::vector& results) = 0; +}; + +typedef void destroy_t(IInferenceEngineCommon*); +typedef IInferenceEngineCommon* init_t(std::string configFile, std::string weightFile, std::string userFile); +} /* Common */ +} /* InferenceEngineInterface */ + +#endif /* __INFERENCE_ENGINE_COMMON_H__ */ diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h new file mode 100644 index 0000000..f797103 --- /dev/null +++ b/include/inference_engine_common_impl.h @@ -0,0 +1,76 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_COMMON_IMPL_H__ +#define __INFERENCE_ENGINE_COMMON_IMPL_H__ + +#include +#include + +#include +#include + +namespace InferenceEngineInterface { +namespace Common { + +class InferenceEngineCommon { +public: + InferenceEngineCommon(inference_backend_type_e backend); + + ~InferenceEngineCommon(); + + int Init(std::string configFile, + std::string weightFile, std::string UserFile); + + void Deinit(); + + // InputTensor + int SetInputTensorParam(); + + int SetInputTensorParamNode(std::string node); + + + // OutputTensor + int SetOutputTensorParam(); + + int SetOutputTensorParamNode(std::string node); + + int SetTargetDevice(inference_target_type_e type); + + // Load and Run + int Load(); + + int CreateInputLayerPassage(); + + int PrepareInputLayerPassage(); + + + int Run(std::vector tensor); + + int GetInferenceResult(std::vector>& dimInfo, std::vector& results); + +private: + void *handle; + IInferenceEngineCommon *engine; + inference_backend_type_e mBackend; + std::vector mUserListName; + +}; + +} /* Common */ +} /* InferenceEngineInterface */ + +#endif /* __INFERENCE_ENGINE_COMMON_IMPL_H__ */ \ No newline at end of file diff --git a/include/inference_engine_error.h b/include/inference_engine_error.h new file mode 100644 index 0000000..e9dbc44 --- /dev/null +++ b/include/inference_engine_error.h @@ -0,0 +1,65 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_ERROR_H__ +#define __INFERENCE_ENGINE_ERROR_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @file inference_engine_error.h + * @brief This file contains error type required by + * inference engine +*/ + +typedef enum { + INFERENCE_ENGINE_ERROR_NONE + = TIZEN_ERROR_NONE, /**< Successful */ + INFERENCE_ENGINE_ERROR_NOT_SUPPORTED + = TIZEN_ERROR_NOT_SUPPORTED, /**< Not supported */ + INFERENCE_ENGINE_ERROR_MSG_TOO_LONG + = TIZEN_ERROR_MSG_TOO_LONG, /**< Message too long */ + INFERENCE_ENGINE_ERROR_NO_DATA + = TIZEN_ERROR_NO_DATA, /**< No data */ + INFERENCE_ENGINE_ERROR_KEY_NOT_AVAILABLE + = TIZEN_ERROR_KEY_NOT_AVAILABLE, /**< Key not available */ + INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY + = TIZEN_ERROR_OUT_OF_MEMORY, /**< Out of memory */ + INFERENCE_ENGINE_ERROR_INVALID_PARAMETER + = TIZEN_ERROR_INVALID_PARAMETER, /**< Invalid parameter */ + INFERENCE_ENGINE_ERROR_INVALID_OPERATION + = TIZEN_ERROR_INVALID_OPERATION, /**< Invalid operation */ + INFERENCE_ENGINE_ERROR_PERMISSION_DENIED + = TIZEN_ERROR_NOT_PERMITTED, /**< Not permitted */ + INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT + = TIZEN_ERROR_MEDIA_VISION | 0x01, /**< Not supported format */ + INFERENCE_ENGINE_ERROR_INTERNAL + = TIZEN_ERROR_MEDIA_VISION | 0x02, /**< Internal error */ + INFERENCE_ENGINE_ERROR_INVALID_DATA + = TIZEN_ERROR_MEDIA_VISION | 0x03, /**< Invalid data */ + INFERENCE_ENGINE_ERROR_INVALID_PATH + = TIZEN_ERROR_MEDIA_VISION | 0x04, /**< Invalid path*/ +} inference_engine_error_e; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INFERENCE_ENGINE_ERROR_H__ */ \ No newline at end of file diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h new file mode 100644 index 0000000..10ba9e7 --- /dev/null +++ b/include/inference_engine_type.h @@ -0,0 +1,104 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_TYPE_H__ +#define __INFERENCE_ENGINE_TYPE_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @file inference_engine_type.h + * @brief This file contains enumerations and handles definition required by + * inference engine API. + */ + +/** + * @brief Enumeration for inference backend. + * + * @since_tizen 5.5 + * + * @see mv_inference_prepare() + */ +typedef enum { + INFERENCE_BACKEND_NONE = -1, + INFERENCE_BACKEND_OPENCV, /**< OpenCV */ + INFERENCE_BACKEND_CAFFE, /**< Caffe */ + INFERENCE_BACKEND_TF, /**< TensorFlow */ + INFERENCE_BACKEND_TFLite, /**< TensorFlow-Lite */ + INFERENCE_BACKEND_MAX +} inference_backend_type_e; + +/** + * @brief Enumeration for inference target. + * + * @since_tizen 5.5 + * + */ +typedef enum { + INFERENCE_TARGET_NONE = -1, + INFERENCE_TARGET_CPU, /**< CPU */ + INFERENCE_TARGET_GPU, /**< GPU*/ + INFERENCE_TARGET_MAX +} inference_target_type_e; + +typedef enum { + INFERENCE_INPUT_GENERAL = 0, + INFERENCE_INPUT_IMAGE, + INFERENCE_INPUT_MAX +} inference_input_type_e; + +typedef struct _ImageClassficationResults { + int number_of_classes; + std::vector indices; + std::vector names; + std::vector confidences; +} ImageClassificationResults; /**< structure ImageClassificationResults */ + +typedef struct _ObjectDetectionResults { + int number_of_objects; + std::vector indices; + std::vector names; + std::vector confidences; + std::vector locations; +} ObjectDetectionResults; /**< structure ObjectDetectionResults */ + +typedef struct _FaceDetectionResults { + int number_of_faces; + std::vector confidences; + std::vector locations; +} FaceDetectionResults; /**< structure ObjectDetectionResults */ + +typedef struct _FacialLandMarkDetectionResults { + int number_of_landmarks; + std::vector locations; +} FacialLandMarkDetectionResults; /**< structure ObjectDetectionResults */ + +typedef struct _InferenceResults{ + int dimInfoSize; + std::vector> dimInfo; + std::vector data; +} InferenceResults; /**< structure InferenceResults */ + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* __INFERENCE_ENGINE_TYPE_H__ */ \ No newline at end of file diff --git a/include/inference_engine_vision.h b/include/inference_engine_vision.h new file mode 100644 index 0000000..a4cb8c3 --- /dev/null +++ b/include/inference_engine_vision.h @@ -0,0 +1,88 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_VISION_H__ +#define __INFERENCE_ENGINE_VISION_H__ + +#include +#include + +#include "inference_engine_type.h" +#include + +namespace InferenceEngineInterface { +namespace Vision { + +class IInferenceEngineVision { +public: + + virtual ~IInferenceEngineVision() {}; + + // InputTensor + virtual int SetInputTensorParam() = 0; + + virtual int SetInputTensorParamInput(int width, int height, int dim, int ch) = 0; + + virtual int SetInputTensorParamNorm(double deviation, double mean) = 0; + + virtual int SetInputTensorParamNode(std::string node) = 0; + + + // OutputTensor + virtual int SetOutputTensorParam() = 0; + + virtual int SetOutputTensorParamThresHold(double threshold) = 0; + + virtual int SetOutputTensorParamNumbers(int number) = 0; + + virtual int SetOutputTensorParamType(int type) = 0; + + virtual int SetOutPutTensorParamNodes(std::string node) = 0; + + virtual int SetTargetDevice(inference_target_type_e type) = 0; + + // Load and Run + virtual int Load() = 0; + + virtual int CreateInputLayerPassage() = 0; + + virtual int PrepareInputLayerPassage(inference_input_type_e type) = 0; +Vision + virtual int Run(cv::Mat tensor) = 0; + + virtual int Run(std::vector tensor) = 0; + + virtual int GetInferenceResult(ImageClassificationResults& results) = 0; + + virtual int GetInferenceResult(ObjectDetectionResults& results) = 0; + + virtual int GetInferenceResult(FaceDetectionResults& results) = 0; + + virtual int GetInferenceResult(FacialLandMarkDetectionResults& results) = 0; + + virtual int GetInferenceResult(std::vector>& dimInfo, std::vector& results) = 0; + + virtual int GetNumberOfOutputs() = 0; + + virtual void SetUserListName(std::string userlist) = 0; +}; + +typedef void destroy_t(IInferenceEngineVision*); +typedef IInferenceEngineVision* init_t(std::string configFile, std::string weightFile, std::string userFile); +} /* Vision */ +} /* InferenceEngineInterface */ + +#endif /* __INFERENCE_ENGINE_VISION_H__ */ diff --git a/include/inference_engine_vision_impl.h b/include/inference_engine_vision_impl.h new file mode 100644 index 0000000..b50fbfb --- /dev/null +++ b/include/inference_engine_vision_impl.h @@ -0,0 +1,90 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_VISION_IMPL_H__ +#define __INFERENCE_ENGINE_VISION_IMPL_H__ + +#include +#include + +#include +#include +#include + + +namespace InferenceEngineInterface { +namespace Vision { + +class InferenceEngineVision { +public: + InferenceEngineVision(inference_backend_type_e backend); + + ~InferenceEngineVision(); + + int Init(std::string configFile, + std::string weightFile, std::string UserFile); + + void Deinit(); + + // Input Tensor parameters + int SetInputTensorParamInput(int width, int height, int dim, int ch); + + int SetInputTensorParamNorm(double deviation, double mean); + + int SetInputTensorParamNode(std::string node); + + // Output Tensor parameters + int SetOutputTensorParamThresHold(double threshold); + + int SetOutputTensorParamNumbers(int number); + + int SetOutputTensorParamType(int type); + + int SetOutPutTensorParamNodes(std::string node); + + // Set target device + int SetTargetDevice(inference_target_type_e device); + + int Load(); + + int Run(cv::Mat tensor); + + int GetInferenceResult(ImageClassificationResults& results); + + int GetInferenceResult(ObjectDetectionResults& results); + + int GetInferenceResult(FaceDetectionResults& results); + + int GetInferenceResult(FacialLandMarkDetectionResults& results); + + int GetInferenceResult(std::vector>& dimInfo, std::vector& results); + + int GetNumberOfOutputs(); + + void SetUserListName(std::string userlist); + +private: + void *handle; + IInferenceEngineVision *engine; + inference_backend_type_e mBackend; + std::vector mUserListName; + +}; + +} /* Vision */ +} /* InferenceEngineInterface */ + +#endif /* __INFERENCE_ENGINE_VISION_IMPL_H__ */ \ No newline at end of file diff --git a/inference-engine-interface-common.manifest b/inference-engine-interface-common.manifest new file mode 100644 index 0000000..86dbb26 --- /dev/null +++ b/inference-engine-interface-common.manifest @@ -0,0 +1,5 @@ + + + + + diff --git a/inference-engine-interface-common.pc.in b/inference-engine-interface-common.pc.in new file mode 100644 index 0000000..e7cd18f --- /dev/null +++ b/inference-engine-interface-common.pc.in @@ -0,0 +1,14 @@ + +# Package Information for pkg-config + +prefix=@PREFIX@ +exec_prefix=/usr +libdir=@LIB_INSTALL_DIR@ +includedir=/usr/include/media + +Name: @PC_NAME@ +Description: @PACKAGE_DESCRIPTION@ +Version: @VERSION@ +Requires: @PC_REQUIRED@ +Libs: -L${libdir} @PC_LDFLAGS@ +Cflags: -I${includedir} -I/usr/include diff --git a/inference-engine-interface-vision.manifest b/inference-engine-interface-vision.manifest new file mode 100644 index 0000000..86dbb26 --- /dev/null +++ b/inference-engine-interface-vision.manifest @@ -0,0 +1,5 @@ + + + + + diff --git a/inference-engine-interface-vision.pc.in b/inference-engine-interface-vision.pc.in new file mode 100644 index 0000000..e7cd18f --- /dev/null +++ b/inference-engine-interface-vision.pc.in @@ -0,0 +1,14 @@ + +# Package Information for pkg-config + +prefix=@PREFIX@ +exec_prefix=/usr +libdir=@LIB_INSTALL_DIR@ +includedir=/usr/include/media + +Name: @PC_NAME@ +Description: @PACKAGE_DESCRIPTION@ +Version: @VERSION@ +Requires: @PC_REQUIRED@ +Libs: -L${libdir} @PC_LDFLAGS@ +Cflags: -I${includedir} -I/usr/include diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec new file mode 100644 index 0000000..b3d6ac3 --- /dev/null +++ b/packaging/inference-engine-interface.spec @@ -0,0 +1,101 @@ +Name: inference-engine-interface +Summary: Interface of inference engines +Version: 0.0.1 +Release: 1 +Group: Multimedia/Framework +License: Apache-2.0 +Source0: %{name}-%{version}.tar.gz +BuildRequires: cmake +BuildRequires: pkgconfig(dlog) +BuildRequires: pkgconfig(libtzplatform-config) +BuildRequires: pkgconfig(capi-base-common) +BuildRequires: pkgconfig(opencv) >= 3.4.1 +BuildRequires: pkgconfig(python) + +%description +Interface of inference engines + +%package devel +Summary: Interface of inference engines +Group: Multimedia/Framework +Requires: %{name} = %{version}-%{release} + +%description devel +Interface of inference engines (Dev) + + +%package common +Summary: Common interface of inference engines +Group: Multimedia/Framework + +%description common +Common interface of inference engines + +%package common-devel +Summary: Common interface of inference engines +Group: Multimedia/Framework +Requires: inference-engine-interface-common + +%description common-devel +Common interface of inference engines (Dev) + +%package vision +Summary: Vision interface of inference engines +Group: Multimedia/Framework + +%description vision +Vision interface of inference engines + +%package vision-devel +Summary: Vision interface of inference engines +Group: Multimedia/Framework +Requires: inference-engine-interface-vision + +%description vision-devel +Vision interface of inference enginese (Dev) + +%prep +%setup -q + +%build +%if 0%{?sec_build_binary_debug_enable} +export CFLAGS="$CFLAGS -DTIZEN_DEBUG_ENABLE" +export CXXFLAGS="$CXXFLAGS -DTIZEN_DEBUG_ENABLE" +export FFLAGS="$FFLAGS -DTIZEN_DEBUG_ENABLE" +%endif + +export CFLAGS+=" -DPATH_LIBDIR=\\\"%{_libdir}\\\"" +export CXXFLAGS+=" -DPATH_LIBDIR=\\\"%{_libdir}\\\"" + +MAJORVER=`echo %{version} | awk 'BEGIN {FS="."}{print $1}'` +%cmake . -DFULLVER=%{version} -DMAJORVER=${MAJORVER} -DTZ_SYS_BIN=%TZ_SYS_BIN \ + +make %{?jobs:-j%jobs} + +%install +rm -rf %{buildroot} + +%make_install + +%post -p /sbin/ldconfig +%postun -p /sbin/ldconfig + +%files common +%manifest inference-engine-interface-common.manifest +%license LICENSE.APLv2 +%{_libdir}/libinference-engine-interface-common.so.* + +%files common-devel +%{_includedir}/media/*.h +%{_libdir}/pkgconfig/*common.pc +%{_libdir}/lib*-common.so + +%files vision +%manifest inference-engine-interface-vision.manifest +%license LICENSE.APLv2 +%{_libdir}/libinference-engine-interface-vision.so.* + +%files vision-devel +%{_includedir}/media/*.h +%{_libdir}/pkgconfig/*vision.pc +%{_libdir}/lib*-vision.so \ No newline at end of file diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp new file mode 100644 index 0000000..9487e7a --- /dev/null +++ b/vision/inference_engine_vision_impl.cpp @@ -0,0 +1,250 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + + +#include +#include +#include +#include +#include + +extern "C" { + +#include + +#ifdef LOG_TAG +#undef LOG_TAG +#endif + +#define LOG_TAG "INFERENCE_ENGINE_VISION" +} + +namespace InferenceEngineInterface { +namespace Vision { + +const char* engineLibs[] = { + "libinference-engine-opencv.so", + "libinference-engine-caffe.so", + "libinference-engine-tf.so", + "libinference-engine-tflite.so"}; + +InferenceEngineVision::InferenceEngineVision(inference_backend_type_e backend) : + mBackend(backend) +{ + LOGE("ENTER"); + LOGE("LEAVE"); +} + +InferenceEngineVision::~InferenceEngineVision() +{ + LOGW("ENTER"); + destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineVisionDestroy"); + // NULL CHECK? + engineDestroy(engine); + dlclose(handle); + + LOGW("LEAVE"); +} + +int InferenceEngineVision::Init(std::string configFile, + std::string weightFile, std::string userFile) +{ + LOGW("ENTER"); + char *error = NULL; + handle = dlopen(engineLibs[mBackend], RTLD_LAZY); + if (!handle) { + LOGE("Fail to dlopen %s", engineLibs[mBackend]); + LOGE("Error: %s\n", dlerror()); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } + + init_t* EngineInit = (init_t *)dlsym(handle, "EngineVisionInit"); + if ((error = dlerror()) != NULL) { + LOGE("Error: %s\n", error); + dlclose(handle); + return INFERENCE_ENGINE_ERROR_INTERNAL; + } + + engine = EngineInit(configFile, weightFile, userFile); + if (engine == NULL) { + LOGE("Fail to EngineInit"); + dlclose(handle); + return INFERENCE_ENGINE_ERROR_INTERNAL; + } + + LOGW("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; +} + +void InferenceEngineVision::Deinit() +{ + ; +} + +int InferenceEngineVision::SetInputTensorParamInput(int width, int height, int dim, int ch) +{ + int ret = engine->SetInputTensorParamInput(width, height, dim, ch); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetInputTensorParamInput"); + + return ret; +} + +int InferenceEngineVision::SetInputTensorParamNorm(double deviation, double mean) +{ + int ret = engine->SetInputTensorParamNorm(deviation, mean); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetInputTensorParamNorm"); + + return ret; +} + +int InferenceEngineVision::SetInputTensorParamNode(std::string node) +{ + int ret = engine->SetInputTensorParamNode(node); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetInputTensorParamNode"); + + return ret; +} + +int InferenceEngineVision::SetOutputTensorParamThresHold(double threshold) +{ + int ret = engine->SetOutputTensorParamThresHold(threshold); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetOutputTensorParamThresHold"); + + return ret; +} + +int InferenceEngineVision::SetOutputTensorParamNumbers(int numbers) +{ + int ret = engine->SetOutputTensorParamNumbers(numbers); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetOuputTensorParamNumbers"); + + return ret; +} + +int InferenceEngineVision::SetOutputTensorParamType(int type) +{ + int ret = engine->SetOutputTensorParamType(type); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetOutputTensorParamType"); + + return ret; +} + +int InferenceEngineVision::SetTargetDevice(inference_target_type_e type) +{ + int ret = engine->SetTargetDevice(type); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetTargetDevice"); + + return ret; +} + +int InferenceEngineVision::Load() +{ + int ret = engine->Load(); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to load InferenceEngineVision"); + + ret = engine->CreateInputLayerPassage(); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to load CreateInputLayerPassage"); + + ret = engine->PrepareInputLayerPassage(INFERENCE_INPUT_IMAGE); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to load PrepareInputLayerPassage"); + + return ret; +} + +int InferenceEngineVision::Run(cv::Mat tensor) +{ + int ret = engine->Run(tensor); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to run InferenceEngineVision"); + + return ret; +} + +int InferenceEngineVision::GetInferenceResult(ImageClassificationResults& results) +{ + int ret = engine->GetInferenceResult(results); + + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to GetClassficationResults"); + // NULL CHECK? + return ret; +} + +int InferenceEngineVision::GetInferenceResult(ObjectDetectionResults& results) +{ + int ret = engine->GetInferenceResult(results); + + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to GetObjectDetectionResults"); + + return ret; +} + +int InferenceEngineVision::GetInferenceResult(FaceDetectionResults& results) +{ + int ret = engine->GetInferenceResult(results); + + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to GetFaceDetectionResults"); + + return ret; +} + +int InferenceEngineVision::GetInferenceResult(FacialLandMarkDetectionResults& results) +{ + int ret = engine->GetInferenceResult(results); + + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to GetFacialLandMarkDetectionResults"); + + return ret; +} + +int InferenceEngineVision::GetInferenceResult(std::vector>& dimInfo, std::vector& results) +{ + int ret = engine->GetInferenceResult(dimInfo, results); + + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to GetInferenceResult"); + + return ret; +} + +int InferenceEngineVision::GetNumberOfOutputs() +{ + return engine->GetNumberOfOutputs(); +} + +void InferenceEngineVision::SetUserListName(std::string userlist) +{ + ; +} + +} /* Vision */ +} /* InferenceEngineInterface */ -- 2.7.4 From bdab999fd1d26f9e6116e2208cdd52409822dfa2 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 4 Jul 2019 21:20:48 +0900 Subject: [PATCH 03/16] Fix build break Signed-off-by: Tae-Young Chung --- include/inference_engine_vision.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/inference_engine_vision.h b/include/inference_engine_vision.h index a4cb8c3..ab6bd62 100644 --- a/include/inference_engine_vision.h +++ b/include/inference_engine_vision.h @@ -60,7 +60,7 @@ public: virtual int CreateInputLayerPassage() = 0; virtual int PrepareInputLayerPassage(inference_input_type_e type) = 0; -Vision + virtual int Run(cv::Mat tensor) = 0; virtual int Run(std::vector tensor) = 0; -- 2.7.4 From 5a919956cec0e933875a39f185b187ca6c5f5347 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 11 Jul 2019 10:19:06 +0900 Subject: [PATCH 04/16] Add INFERENCE_TARGET_CUSTOM and change constructor to get parameter of string type Change-Id: Ib2f1f159911e0dc0d0f1abbbf5807b354b24576b Signed-off-by: Tae-Young Chung --- common/inference_engine_common_impl.cpp | 13 ++++--------- include/inference_engine_common_impl.h | 5 +++-- include/inference_engine_type.h | 25 +++++-------------------- include/inference_engine_vision_impl.h | 8 ++++---- packaging/inference-engine-interface.spec | 4 ++-- vision/inference_engine_vision_impl.cpp | 19 +++++++------------ 6 files changed, 25 insertions(+), 49 deletions(-) diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index 24afee2..758bd5a 100644 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -37,15 +37,10 @@ extern "C" { namespace InferenceEngineInterface { namespace Common { -const char* engineLibs[] = { - "libinference-engine-caffe.so", - "libinference-engine-tf.so", - "libinference-engine-tflite.so"}; - -InferenceEngineCommon::InferenceEngineCommon(inference_backend_type_e backend) : - mBackend(backend) +InferenceEngineCommon::InferenceEngineCommon(std::string backend) { LOGE("ENTER"); + mBackendLibName = "libinference-engine-" + backend + ".so"; LOGE("LEAVE"); } @@ -65,9 +60,9 @@ int InferenceEngineCommon::Init(std::string configFile, { LOGW("ENTER"); char *error = NULL; - handle = dlopen(engineLibs[mBackend], RTLD_LAZY); + handle = dlopen(mBackendLibName.c_str(), RTLD_LAZY); if (!handle) { - LOGE("Fail to dlopen %s", engineLibs[mBackend]); + LOGE("Fail to dlopen %s", mBackendLibName.c_str()); LOGE("Error: %s\n", dlerror()); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h index f797103..f94596e 100644 --- a/include/inference_engine_common_impl.h +++ b/include/inference_engine_common_impl.h @@ -28,7 +28,8 @@ namespace Common { class InferenceEngineCommon { public: - InferenceEngineCommon(inference_backend_type_e backend); + + InferenceEngineCommon(std::string backend); ~InferenceEngineCommon(); @@ -65,7 +66,7 @@ public: private: void *handle; IInferenceEngineCommon *engine; - inference_backend_type_e mBackend; + std::string mBackendLibName; std::vector mUserListName; }; diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h index 10ba9e7..5860582 100644 --- a/include/inference_engine_type.h +++ b/include/inference_engine_type.h @@ -30,32 +30,17 @@ extern "C" { */ /** - * @brief Enumeration for inference backend. - * - * @since_tizen 5.5 - * - * @see mv_inference_prepare() - */ -typedef enum { - INFERENCE_BACKEND_NONE = -1, - INFERENCE_BACKEND_OPENCV, /**< OpenCV */ - INFERENCE_BACKEND_CAFFE, /**< Caffe */ - INFERENCE_BACKEND_TF, /**< TensorFlow */ - INFERENCE_BACKEND_TFLite, /**< TensorFlow-Lite */ - INFERENCE_BACKEND_MAX -} inference_backend_type_e; - -/** * @brief Enumeration for inference target. * * @since_tizen 5.5 * */ typedef enum { - INFERENCE_TARGET_NONE = -1, + INFERENCE_TARGET_NONE = -1, INFERENCE_TARGET_CPU, /**< CPU */ - INFERENCE_TARGET_GPU, /**< GPU*/ - INFERENCE_TARGET_MAX + INFERENCE_TARGET_GPU, /**< GPU */ + INFERENCE_TARGET_CUSTOM, /**< NPU */ + INFERENCE_TARGET_MAX } inference_target_type_e; typedef enum { @@ -101,4 +86,4 @@ typedef struct _InferenceResults{ } #endif /* __cplusplus */ -#endif /* __INFERENCE_ENGINE_TYPE_H__ */ \ No newline at end of file +#endif /* __INFERENCE_ENGINE_TYPE_H__ */ diff --git a/include/inference_engine_vision_impl.h b/include/inference_engine_vision_impl.h index b50fbfb..0ba444f 100644 --- a/include/inference_engine_vision_impl.h +++ b/include/inference_engine_vision_impl.h @@ -30,7 +30,7 @@ namespace Vision { class InferenceEngineVision { public: - InferenceEngineVision(inference_backend_type_e backend); + InferenceEngineVision(std::string backend); ~InferenceEngineVision(); @@ -73,13 +73,13 @@ public: int GetInferenceResult(std::vector>& dimInfo, std::vector& results); int GetNumberOfOutputs(); - + void SetUserListName(std::string userlist); private: void *handle; IInferenceEngineVision *engine; - inference_backend_type_e mBackend; + std::string mBackendLibName; std::vector mUserListName; }; @@ -87,4 +87,4 @@ private: } /* Vision */ } /* InferenceEngineInterface */ -#endif /* __INFERENCE_ENGINE_VISION_IMPL_H__ */ \ No newline at end of file +#endif /* __INFERENCE_ENGINE_VISION_IMPL_H__ */ diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index b3d6ac3..9a0da02 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 1 +Release: 2 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz @@ -98,4 +98,4 @@ rm -rf %{buildroot} %files vision-devel %{_includedir}/media/*.h %{_libdir}/pkgconfig/*vision.pc -%{_libdir}/lib*-vision.so \ No newline at end of file +%{_libdir}/lib*-vision.so diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index 9487e7a..47c980a 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -38,16 +38,10 @@ extern "C" { namespace InferenceEngineInterface { namespace Vision { -const char* engineLibs[] = { - "libinference-engine-opencv.so", - "libinference-engine-caffe.so", - "libinference-engine-tf.so", - "libinference-engine-tflite.so"}; - -InferenceEngineVision::InferenceEngineVision(inference_backend_type_e backend) : - mBackend(backend) +InferenceEngineVision::InferenceEngineVision(std::string backend) { LOGE("ENTER"); + mBackendLibName = "libinference-engine-" + backend + ".so"; LOGE("LEAVE"); } @@ -67,20 +61,21 @@ int InferenceEngineVision::Init(std::string configFile, { LOGW("ENTER"); char *error = NULL; - handle = dlopen(engineLibs[mBackend], RTLD_LAZY); + handle = dlopen(mBackendLibName.c_str(), RTLD_LAZY); + LOGE("dlopen %s", mBackendLibName.c_str()); if (!handle) { - LOGE("Fail to dlopen %s", engineLibs[mBackend]); + LOGE("Fail to dlopen %s", mBackendLibName.c_str()); LOGE("Error: %s\n", dlerror()); return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } - + init_t* EngineInit = (init_t *)dlsym(handle, "EngineVisionInit"); if ((error = dlerror()) != NULL) { LOGE("Error: %s\n", error); dlclose(handle); return INFERENCE_ENGINE_ERROR_INTERNAL; } - + engine = EngineInit(configFile, weightFile, userFile); if (engine == NULL) { LOGE("Fail to EngineInit"); -- 2.7.4 From 2367acca9c3d6ba1c10111678908165c0f96cc79 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Wed, 24 Jul 2019 16:55:31 +0900 Subject: [PATCH 05/16] Change the parameter of SetOutputTensorParamNodes() from std::string to std::vector Now, multiple output tensors are supported. You can set the output tensors' names to get multiple output tensors Note that a batch is not supported in current inference, so an input tensor should be one string Change-Id: I01a3a8fa7f15ce329fbab187f6d72f583f116c2a Signed-off-by: Tae-Young Chung --- common/inference_engine_common_impl.cpp | 4 ++-- include/inference_engine_common.h | 2 +- include/inference_engine_common_impl.h | 4 ++-- include/inference_engine_vision.h | 2 +- include/inference_engine_vision_impl.h | 2 +- packaging/inference-engine-interface.spec | 2 +- vision/inference_engine_vision_impl.cpp | 9 +++++++++ 7 files changed, 17 insertions(+), 8 deletions(-) diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index 758bd5a..ccd4335 100644 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -109,9 +109,9 @@ int InferenceEngineCommon::SetOutputTensorParam() return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } -int InferenceEngineCommon::SetOutputTensorParamNode(std::string node) +int InferenceEngineCommon::SetOutputTensorParamNodes(std::vector nodes) { - int ret = engine->SetOutPutTensorParamNodes(node); + int ret = engine->SetOutputTensorParamNodes(nodes); if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to SetOutputTensorParamNodes"); diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h index 6dc02ff..985e8d9 100644 --- a/include/inference_engine_common.h +++ b/include/inference_engine_common.h @@ -39,7 +39,7 @@ public: // OutputTensor virtual int SetOutputTensorParam() = 0; - virtual int SetOutPutTensorParamNodes(std::string node) = 0; + virtual int SetOutputTensorParamNodes(std::vector nodes) = 0; virtual int SetTargetDevice(inference_target_type_e type) = 0; diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h index f94596e..6b37b15 100644 --- a/include/inference_engine_common_impl.h +++ b/include/inference_engine_common_impl.h @@ -47,7 +47,7 @@ public: // OutputTensor int SetOutputTensorParam(); - int SetOutputTensorParamNode(std::string node); + int SetOutputTensorParamNodes(std::vector nodes); int SetTargetDevice(inference_target_type_e type); @@ -74,4 +74,4 @@ private: } /* Common */ } /* InferenceEngineInterface */ -#endif /* __INFERENCE_ENGINE_COMMON_IMPL_H__ */ \ No newline at end of file +#endif /* __INFERENCE_ENGINE_COMMON_IMPL_H__ */ diff --git a/include/inference_engine_vision.h b/include/inference_engine_vision.h index ab6bd62..6b143ba 100644 --- a/include/inference_engine_vision.h +++ b/include/inference_engine_vision.h @@ -50,7 +50,7 @@ public: virtual int SetOutputTensorParamType(int type) = 0; - virtual int SetOutPutTensorParamNodes(std::string node) = 0; + virtual int SetOutputTensorParamNodes(std::vector nodes) = 0; virtual int SetTargetDevice(inference_target_type_e type) = 0; diff --git a/include/inference_engine_vision_impl.h b/include/inference_engine_vision_impl.h index 0ba444f..12304f9 100644 --- a/include/inference_engine_vision_impl.h +++ b/include/inference_engine_vision_impl.h @@ -53,7 +53,7 @@ public: int SetOutputTensorParamType(int type); - int SetOutPutTensorParamNodes(std::string node); + int SetOutputTensorParamNodes(std::vector nodes); // Set target device int SetTargetDevice(inference_target_type_e device); diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 9a0da02..d4ce9f3 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 2 +Release: 3 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index 47c980a..cb29c60 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -119,6 +119,15 @@ int InferenceEngineVision::SetInputTensorParamNode(std::string node) return ret; } +int InferenceEngineVision::SetOutputTensorParamNodes(std::vector nodes) +{ + int ret = engine->SetOutputTensorParamNodes(nodes); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetOutputTensorParamNodes"); + + return ret; +} + int InferenceEngineVision::SetOutputTensorParamThresHold(double threshold) { int ret = engine->SetOutputTensorParamThresHold(threshold); -- 2.7.4 From 1a6c4b16930f78718d7cead99ee89f3dfd3601b2 Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Thu, 22 Aug 2019 20:21:30 +0900 Subject: [PATCH 06/16] Implementation of inheritance relationship. IInferenceEngineCommon class should be parent of IInferenceEngineVision class. So i implemented inheritance relationship between two classes. Change-Id: I9a4e572dbba9d4e8fd118ec73305f46a912015fa Signed-off-by: Hyunsoo Park --- common/inference_engine_common_impl.cpp | 33 +++++++------- include/inference_engine_common.h | 4 +- include/inference_engine_common_impl.h | 17 +++----- include/inference_engine_vision.h | 23 ++-------- include/inference_engine_vision_impl.h | 26 ++++------- packaging/inference-engine-interface.spec | 2 +- vision/inference_engine_vision_impl.cpp | 71 +++++-------------------------- 7 files changed, 48 insertions(+), 128 deletions(-) diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index ccd4335..def2ace 100644 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -14,8 +14,8 @@ * limitations under the License. */ -#include -#include +#include "inference_engine_error.h" +#include "inference_engine_common_impl.h" #include #include @@ -95,15 +95,6 @@ int InferenceEngineCommon::SetInputTensorParam() return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; } -int InferenceEngineCommon::SetInputTensorParamNode(std::string node) -{ - int ret = engine->SetInputTensorParamNode(node); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetInputTensorParamNode"); - - return ret; -} - int InferenceEngineCommon::SetOutputTensorParam() { return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; @@ -111,10 +102,11 @@ int InferenceEngineCommon::SetOutputTensorParam() int InferenceEngineCommon::SetOutputTensorParamNodes(std::vector nodes) { + LOGI("ENTER"); int ret = engine->SetOutputTensorParamNodes(nodes); if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to SetOutputTensorParamNodes"); - + LOGI("LEAVE"); return ret; } @@ -127,7 +119,7 @@ int InferenceEngineCommon::SetTargetDevice(inference_target_type_e type) return ret; } -int InferenceEngineCommon::Load() +int InferenceEngineCommon::Load(inference_input_type_e type) { int ret = engine->Load(); if (ret != INFERENCE_ENGINE_ERROR_NONE) @@ -138,7 +130,7 @@ int InferenceEngineCommon::Load() LOGE("Fail to load CreateInputLayerPassage"); - ret = engine->PrepareInputLayerPassage(); + ret = engine->PrepareInputLayerPassage(type); if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to load PrepareInputLayerPassage"); @@ -155,13 +147,24 @@ int InferenceEngineCommon::Run(std::vector tensor) return ret; } +int InferenceEngineCommon::SetInputTensorParamNode(std::string node) +{ + LOGE("ENTER"); + int ret = engine->SetInputTensorParamNode(node); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetInputTensorParamNode"); + LOGE("LEAVE"); + return ret; +} + int InferenceEngineCommon::GetInferenceResult(std::vector>& dimInfo, std::vector& results) { + LOGE("ENTER"); int ret = engine->GetInferenceResult(dimInfo, results); if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to GetInferenceResult"); - + LOGE("LEAVE"); return ret; } } /* Common */ diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h index 985e8d9..f7fc23e 100644 --- a/include/inference_engine_common.h +++ b/include/inference_engine_common.h @@ -35,7 +35,6 @@ public: virtual int SetInputTensorParamNode(std::string node) = 0; - // OutputTensor virtual int SetOutputTensorParam() = 0; @@ -48,8 +47,7 @@ public: virtual int CreateInputLayerPassage() = 0; - virtual int PrepareInputLayerPassage() = 0; - + virtual int PrepareInputLayerPassage(inference_input_type_e type) = 0; virtual int Run(std::vector tensor) = 0; diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h index 6b37b15..ab78c91 100644 --- a/include/inference_engine_common_impl.h +++ b/include/inference_engine_common_impl.h @@ -20,8 +20,8 @@ #include #include -#include -#include +#include "inference_engine_common.h" +#include "inference_engine_type.h" namespace InferenceEngineInterface { namespace Common { @@ -43,7 +43,6 @@ public: int SetInputTensorParamNode(std::string node); - // OutputTensor int SetOutputTensorParam(); @@ -52,22 +51,18 @@ public: int SetTargetDevice(inference_target_type_e type); // Load and Run - int Load(); - - int CreateInputLayerPassage(); - - int PrepareInputLayerPassage(); - + int Load(inference_input_type_e type); int Run(std::vector tensor); int GetInferenceResult(std::vector>& dimInfo, std::vector& results); private: - void *handle; - IInferenceEngineCommon *engine; std::string mBackendLibName; std::vector mUserListName; +protected: + void *handle; + IInferenceEngineCommon *engine; }; diff --git a/include/inference_engine_vision.h b/include/inference_engine_vision.h index 6b143ba..26f4f89 100644 --- a/include/inference_engine_vision.h +++ b/include/inference_engine_vision.h @@ -21,28 +21,24 @@ #include #include "inference_engine_type.h" +#include "inference_engine_common.h" #include namespace InferenceEngineInterface { namespace Vision { -class IInferenceEngineVision { +class IInferenceEngineVision : public virtual Common::IInferenceEngineCommon { public: - + using Common::IInferenceEngineCommon::GetInferenceResult; virtual ~IInferenceEngineVision() {}; // InputTensor - virtual int SetInputTensorParam() = 0; virtual int SetInputTensorParamInput(int width, int height, int dim, int ch) = 0; virtual int SetInputTensorParamNorm(double deviation, double mean) = 0; - virtual int SetInputTensorParamNode(std::string node) = 0; - - // OutputTensor - virtual int SetOutputTensorParam() = 0; virtual int SetOutputTensorParamThresHold(double threshold) = 0; @@ -50,21 +46,10 @@ public: virtual int SetOutputTensorParamType(int type) = 0; - virtual int SetOutputTensorParamNodes(std::vector nodes) = 0; - - virtual int SetTargetDevice(inference_target_type_e type) = 0; - // Load and Run - virtual int Load() = 0; - - virtual int CreateInputLayerPassage() = 0; - - virtual int PrepareInputLayerPassage(inference_input_type_e type) = 0; virtual int Run(cv::Mat tensor) = 0; - virtual int Run(std::vector tensor) = 0; - virtual int GetInferenceResult(ImageClassificationResults& results) = 0; virtual int GetInferenceResult(ObjectDetectionResults& results) = 0; @@ -73,8 +58,6 @@ public: virtual int GetInferenceResult(FacialLandMarkDetectionResults& results) = 0; - virtual int GetInferenceResult(std::vector>& dimInfo, std::vector& results) = 0; - virtual int GetNumberOfOutputs() = 0; virtual void SetUserListName(std::string userlist) = 0; diff --git a/include/inference_engine_vision_impl.h b/include/inference_engine_vision_impl.h index 12304f9..89a1c13 100644 --- a/include/inference_engine_vision_impl.h +++ b/include/inference_engine_vision_impl.h @@ -19,17 +19,18 @@ #include #include - -#include -#include +#include "inference_engine_common_impl.h" +#include "inference_engine_vision.h" +#include "inference_engine_type.h" #include namespace InferenceEngineInterface { namespace Vision { -class InferenceEngineVision { +class InferenceEngineVision : public Common::InferenceEngineCommon { public: + using Common::InferenceEngineCommon::GetInferenceResult; InferenceEngineVision(std::string backend); ~InferenceEngineVision(); @@ -44,8 +45,6 @@ public: int SetInputTensorParamNorm(double deviation, double mean); - int SetInputTensorParamNode(std::string node); - // Output Tensor parameters int SetOutputTensorParamThresHold(double threshold); @@ -53,13 +52,6 @@ public: int SetOutputTensorParamType(int type); - int SetOutputTensorParamNodes(std::vector nodes); - - // Set target device - int SetTargetDevice(inference_target_type_e device); - - int Load(); - int Run(cv::Mat tensor); int GetInferenceResult(ImageClassificationResults& results); @@ -70,18 +62,16 @@ public: int GetInferenceResult(FacialLandMarkDetectionResults& results); - int GetInferenceResult(std::vector>& dimInfo, std::vector& results); - int GetNumberOfOutputs(); void SetUserListName(std::string userlist); private: - void *handle; - IInferenceEngineVision *engine; std::string mBackendLibName; std::vector mUserListName; - +protected: + void *handle; + IInferenceEngineVision *engine; }; } /* Vision */ diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index d4ce9f3..52a667a 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 3 +Release: 4 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index cb29c60..e383f4e 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -14,8 +14,8 @@ * limitations under the License. */ -#include -#include +#include "inference_engine_error.h" +#include "inference_engine_vision_impl.h" #include @@ -37,8 +37,7 @@ extern "C" { namespace InferenceEngineInterface { namespace Vision { - -InferenceEngineVision::InferenceEngineVision(std::string backend) +InferenceEngineVision::InferenceEngineVision(std::string backend) : Common::InferenceEngineCommon(backend) { LOGE("ENTER"); mBackendLibName = "libinference-engine-" + backend + ".so"; @@ -62,7 +61,8 @@ int InferenceEngineVision::Init(std::string configFile, LOGW("ENTER"); char *error = NULL; handle = dlopen(mBackendLibName.c_str(), RTLD_LAZY); - LOGE("dlopen %s", mBackendLibName.c_str()); + LOGI("HANDLE : [%p]", handle); + if (!handle) { LOGE("Fail to dlopen %s", mBackendLibName.c_str()); LOGE("Error: %s\n", dlerror()); @@ -77,12 +77,18 @@ int InferenceEngineVision::Init(std::string configFile, } engine = EngineInit(configFile, weightFile, userFile); + LOGI("dlopen %s", mBackendLibName.c_str()); + if (engine == NULL) { LOGE("Fail to EngineInit"); dlclose(handle); return INFERENCE_ENGINE_ERROR_INTERNAL; } + Common::InferenceEngineCommon::handle = handle; + + Common::InferenceEngineCommon::engine = engine; + LOGW("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; } @@ -110,24 +116,6 @@ int InferenceEngineVision::SetInputTensorParamNorm(double deviation, double mean return ret; } -int InferenceEngineVision::SetInputTensorParamNode(std::string node) -{ - int ret = engine->SetInputTensorParamNode(node); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetInputTensorParamNode"); - - return ret; -} - -int InferenceEngineVision::SetOutputTensorParamNodes(std::vector nodes) -{ - int ret = engine->SetOutputTensorParamNodes(nodes); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetOutputTensorParamNodes"); - - return ret; -} - int InferenceEngineVision::SetOutputTensorParamThresHold(double threshold) { int ret = engine->SetOutputTensorParamThresHold(threshold); @@ -155,32 +143,6 @@ int InferenceEngineVision::SetOutputTensorParamType(int type) return ret; } -int InferenceEngineVision::SetTargetDevice(inference_target_type_e type) -{ - int ret = engine->SetTargetDevice(type); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetTargetDevice"); - - return ret; -} - -int InferenceEngineVision::Load() -{ - int ret = engine->Load(); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to load InferenceEngineVision"); - - ret = engine->CreateInputLayerPassage(); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to load CreateInputLayerPassage"); - - ret = engine->PrepareInputLayerPassage(INFERENCE_INPUT_IMAGE); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to load PrepareInputLayerPassage"); - - return ret; -} - int InferenceEngineVision::Run(cv::Mat tensor) { int ret = engine->Run(tensor); @@ -230,16 +192,6 @@ int InferenceEngineVision::GetInferenceResult(FacialLandMarkDetectionResults& re return ret; } -int InferenceEngineVision::GetInferenceResult(std::vector>& dimInfo, std::vector& results) -{ - int ret = engine->GetInferenceResult(dimInfo, results); - - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to GetInferenceResult"); - - return ret; -} - int InferenceEngineVision::GetNumberOfOutputs() { return engine->GetNumberOfOutputs(); @@ -249,6 +201,5 @@ void InferenceEngineVision::SetUserListName(std::string userlist) { ; } - } /* Vision */ } /* InferenceEngineInterface */ -- 2.7.4 From 9c52ebc04282f152759fc98474f81436ad9fbcc1 Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Fri, 6 Sep 2019 15:29:04 +0900 Subject: [PATCH 07/16] Sets and Checks NULL value to pointer variable. C++ delete method just release allocated memory. It doesn't set NULL value to variable. So i make variable to NULL after delete process. Change-Id: Ia63a08c47b2e41263e7195aaca62bb67903d8c11 Signed-off-by: Hyunsoo Park --- common/inference_engine_common_impl.cpp | 11 +++++++---- packaging/inference-engine-interface.spec | 2 +- vision/inference_engine_vision_impl.cpp | 14 +++++++++----- 3 files changed, 17 insertions(+), 10 deletions(-) diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index def2ace..c04c9fc 100644 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -47,10 +47,13 @@ InferenceEngineCommon::InferenceEngineCommon(std::string backend) InferenceEngineCommon::~InferenceEngineCommon() { LOGW("ENTER"); - destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineCommonDestroy"); - // NULL CHECK? - engineDestroy(engine); - dlclose(handle); + if (handle){ + destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineCommonDestroy"); + engineDestroy(engine); + dlclose(handle); + engine = nullptr; + handle = nullptr; + } LOGW("LEAVE"); } diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 52a667a..801b0e6 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 4 +Release: 5 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index e383f4e..ae6a2fd 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -47,11 +47,16 @@ InferenceEngineVision::InferenceEngineVision(std::string backend) : Common::Infe InferenceEngineVision::~InferenceEngineVision() { LOGW("ENTER"); - destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineVisionDestroy"); - // NULL CHECK? - engineDestroy(engine); - dlclose(handle); + if (handle) { + destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineVisionDestroy"); + engineDestroy(engine); + dlclose(handle); + engine = nullptr; + handle = nullptr; + Common::InferenceEngineCommon::handle = nullptr; + Common::InferenceEngineCommon::engine = nullptr; + } LOGW("LEAVE"); } @@ -86,7 +91,6 @@ int InferenceEngineVision::Init(std::string configFile, } Common::InferenceEngineCommon::handle = handle; - Common::InferenceEngineCommon::engine = engine; LOGW("LEAVE"); -- 2.7.4 From 5f884401c1412f1473907349b67db34740181a8e Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Wed, 18 Sep 2019 17:30:35 +0900 Subject: [PATCH 08/16] Reimplement class InferenceEngineVision using InferenceEngineCommon This commit removes the IInferenceEngineVision and reimlement InferenceEngienVision as commit title. In detail, InferenceEngineCommon provides general inference operation such as set parameters, load model. Thus, InferenceEngineVision can be implemented by InferenceEngineCommon, instead of IInferenceEngineVision. Change-Id: I7a43bc6ff97e5713c1ac7ae705ff52100f5e4977 Signed-off-by: Tae-Young Chung --- common/inference_engine_common_impl.cpp | 68 +++-- include/inference_engine_common.h | 76 ++++- include/inference_engine_common_impl.h | 91 +++++- include/inference_engine_type.h | 59 ++-- include/inference_engine_vision.h | 71 ----- include/inference_engine_vision_impl.h | 162 ++++++++++- packaging/inference-engine-interface.spec | 2 +- vision/inference_engine_vision_impl.cpp | 442 +++++++++++++++++++++++++----- 8 files changed, 745 insertions(+), 226 deletions(-) delete mode 100644 include/inference_engine_vision.h diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index c04c9fc..d1aa2ce 100644 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -37,33 +37,34 @@ extern "C" { namespace InferenceEngineInterface { namespace Common { -InferenceEngineCommon::InferenceEngineCommon(std::string backend) +InferenceEngineCommon::InferenceEngineCommon(std::string backend) : + handle(nullptr), + engine(nullptr) { LOGE("ENTER"); mBackendLibName = "libinference-engine-" + backend + ".so"; + LOGE("lib: %s", mBackendLibName.c_str()); LOGE("LEAVE"); } InferenceEngineCommon::~InferenceEngineCommon() { LOGW("ENTER"); - if (handle){ - destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineCommonDestroy"); - engineDestroy(engine); - dlclose(handle); - engine = nullptr; - handle = nullptr; - } + if (handle) + Deinit(); LOGW("LEAVE"); } int InferenceEngineCommon::Init(std::string configFile, - std::string weightFile, std::string userFile) + std::string weightFile) { LOGW("ENTER"); char *error = NULL; - handle = dlopen(mBackendLibName.c_str(), RTLD_LAZY); + LOGI("lib: %s", mBackendLibName.c_str()); + handle = dlopen(mBackendLibName.c_str(), RTLD_NOW/*RTLD_LAZY*/); + LOGI("HANDLE : [%p]", handle); + if (!handle) { LOGE("Fail to dlopen %s", mBackendLibName.c_str()); LOGE("Error: %s\n", dlerror()); @@ -77,7 +78,7 @@ int InferenceEngineCommon::Init(std::string configFile, return INFERENCE_ENGINE_ERROR_INTERNAL; } - engine = EngineInit(configFile, weightFile, userFile); + engine = EngineInit(configFile, weightFile); if (engine == NULL) { LOGE("Fail to EngineInit"); dlclose(handle); @@ -90,7 +91,17 @@ int InferenceEngineCommon::Init(std::string configFile, void InferenceEngineCommon::Deinit() { - ; + LOGW("ENTER"); + + if (handle) { + destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineCommonDestroy"); + engineDestroy(engine); + dlclose(handle); + engine = nullptr; + handle = nullptr; + } + + LOGW("LEAVE"); } int InferenceEngineCommon::SetInputTensorParam() @@ -122,7 +133,7 @@ int InferenceEngineCommon::SetTargetDevice(inference_target_type_e type) return ret; } -int InferenceEngineCommon::Load(inference_input_type_e type) +int InferenceEngineCommon::Load() { int ret = engine->Load(); if (ret != INFERENCE_ENGINE_ERROR_NONE) @@ -132,11 +143,29 @@ int InferenceEngineCommon::Load(inference_input_type_e type) if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to load CreateInputLayerPassage"); + return ret; +} - ret = engine->PrepareInputLayerPassage(type); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to load PrepareInputLayerPassage"); +int InferenceEngineCommon::GetInputLayerAttrType() +{ + return engine->GetInputLayerAttrType(); +} +void * InferenceEngineCommon::GetInputDataPtr() +{ + return engine->GetInputDataPtr(); +} + +int InferenceEngineCommon::SetInputDataBuffer(tensor_t data) +{ + return engine->SetInputDataBuffer(data); +} + +int InferenceEngineCommon::Run() +{ + int ret = engine->Run(); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to run InferenceEngineCommon"); return ret; } @@ -145,7 +174,7 @@ int InferenceEngineCommon::Run(std::vector tensor) { int ret = engine->Run(tensor); if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to run InferenceEngineVision"); + LOGE("Fail to run InferenceEngineCommon"); return ret; } @@ -160,13 +189,14 @@ int InferenceEngineCommon::SetInputTensorParamNode(std::string node) return ret; } -int InferenceEngineCommon::GetInferenceResult(std::vector>& dimInfo, std::vector& results) +int InferenceEngineCommon::GetInferenceResult(tensor_t& results) { LOGE("ENTER"); - int ret = engine->GetInferenceResult(dimInfo, results); + int ret = engine->GetInferenceResult(results); if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to GetInferenceResult"); + LOGE("LEAVE"); return ret; } diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h index f7fc23e..78ba3c8 100644 --- a/include/inference_engine_common.h +++ b/include/inference_engine_common.h @@ -30,32 +30,96 @@ public: virtual ~IInferenceEngineCommon() {}; - // InputTensor + /** + * @brief Set parameters for an input tensor. + * + * @since_tizen 5.5 + */ virtual int SetInputTensorParam() = 0; + /** + * @brief Set an input node name. + * + * @since_tizen 5.5 + */ virtual int SetInputTensorParamNode(std::string node) = 0; - // OutputTensor + /** + * @brief Set parameters for output tensors. + * + * @since_tizen 5.5 + */ virtual int SetOutputTensorParam() = 0; + /** + * @brief Set output nodes' names. + * + * @since_tizen 5.5 + */ virtual int SetOutputTensorParamNodes(std::vector nodes) = 0; + /** + * @brief Set target device. + * @details See #inference_target_type_e + * + * @since_tizen 5.5 + */ virtual int SetTargetDevice(inference_target_type_e type) = 0; - // Load and Run + /** + * @brief Load model data. + * + * @since_tizen 5.5 + */ virtual int Load() = 0; + /** + * @brief Create a memory. + * + * @since_tizen 5.5 + */ virtual int CreateInputLayerPassage() = 0; - virtual int PrepareInputLayerPassage(inference_input_type_e type) = 0; + /** + * @brief Get an input layer's type such as float32, float16, and so on. + * + * @since_tizen 5.5 + */ + virtual int GetInputLayerAttrType() = 0; + + /** + * @brief Get an input data pointer. + * + * @since_tizen 5.5 + */ + virtual void* GetInputDataPtr() = 0; + + /** + * @brief Set an input data buffer. + * + * @since_tizen 5.5 + */ + virtual int SetInputDataBuffer(tensor_t data) = 0; + + /** + * @brief Run an inference. + * + * @since_tizen 5.5 + */ + virtual int Run() = 0; virtual int Run(std::vector tensor) = 0; - virtual int GetInferenceResult(std::vector>& dimInfo, std::vector& results) = 0; + /** + * @brief Get inference results. + * + * @since_tizen 5.5 + */ + virtual int GetInferenceResult(tensor_t& results) = 0; }; typedef void destroy_t(IInferenceEngineCommon*); -typedef IInferenceEngineCommon* init_t(std::string configFile, std::string weightFile, std::string userFile); +typedef IInferenceEngineCommon* init_t(std::string configFile, std::string weightFile); } /* Common */ } /* InferenceEngineInterface */ diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h index ab78c91..6402f5e 100644 --- a/include/inference_engine_common_impl.h +++ b/include/inference_engine_common_impl.h @@ -33,33 +33,106 @@ public: ~InferenceEngineCommon(); - int Init(std::string configFile, - std::string weightFile, std::string UserFile); - + /** + * @brief Intialization. + * @details Load the backend engine by dlopen() and initialize the engine by + * calling EngineCommonInit which is found by dlsym(). + * @since_tizen 5.5 + */ + int Init(std::string configFile, std::string weightFile); + + /** + * @brief De-initiaization. + * @details Destroy the engine by calling EngineCommonDestroy which is found dlsym(). + * Close the engine by dlclose(). + * @since_tizen 5.5 + */ void Deinit(); - // InputTensor + /** + * @brief Set parameters for an input tensor. + * @details Wrapper of class IInferenceEngineCommon + * + * @since_tizen 5.5 + */ int SetInputTensorParam(); + /** + * @brief Set an input node name. + * + * @since_tizen 5.5 + */ int SetInputTensorParamNode(std::string node); - // OutputTensor + /** + * @brief Set parameters for output tensors. + * + * @since_tizen 5.5 + */ int SetOutputTensorParam(); + /** + * @brief Set output nodes' names. + * + * @since_tizen 5.5 + */ int SetOutputTensorParamNodes(std::vector nodes); + /** + * @brief Set target device. + * @details See #inference_target_type_e + * + * @since_tizen 5.5 + */ int SetTargetDevice(inference_target_type_e type); - // Load and Run - int Load(inference_input_type_e type); + /** + * @brief Load model data. + * + * @since_tizen 5.5 + */ + int Load(); + + /** + * @brief Get an input layer's type such as float32, float16, and so on. + * + * @since_tizen 5.5 + */ + int GetInputLayerAttrType(); + + /** + * @brief Get an input data pointer. + * + * @since_tizen 5.5 + */ + void* GetInputDataPtr(); + + /** + * @brief Set an input data buffer. + * + * @since_tizen 5.5 + */ + int SetInputDataBuffer(tensor_t data); + + /** + * @brief Run an inference. + * + * @since_tizen 5.5 + */ + int Run(); int Run(std::vector tensor); - int GetInferenceResult(std::vector>& dimInfo, std::vector& results); + /** + * @brief Get inference results. + * + * @since_tizen 5.5 + */ + int GetInferenceResult(tensor_t& results); private: std::string mBackendLibName; - std::vector mUserListName; + protected: void *handle; IInferenceEngineCommon *engine; diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h index 5860582..425ba52 100644 --- a/include/inference_engine_type.h +++ b/include/inference_engine_type.h @@ -17,8 +17,6 @@ #ifndef __INFERENCE_ENGINE_TYPE_H__ #define __INFERENCE_ENGINE_TYPE_H__ -#include - #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ @@ -43,44 +41,27 @@ typedef enum { INFERENCE_TARGET_MAX } inference_target_type_e; -typedef enum { - INFERENCE_INPUT_GENERAL = 0, - INFERENCE_INPUT_IMAGE, - INFERENCE_INPUT_MAX -} inference_input_type_e; - -typedef struct _ImageClassficationResults { - int number_of_classes; - std::vector indices; - std::vector names; - std::vector confidences; -} ImageClassificationResults; /**< structure ImageClassificationResults */ - -typedef struct _ObjectDetectionResults { - int number_of_objects; - std::vector indices; - std::vector names; - std::vector confidences; - std::vector locations; -} ObjectDetectionResults; /**< structure ObjectDetectionResults */ - -typedef struct _FaceDetectionResults { - int number_of_faces; - std::vector confidences; - std::vector locations; -} FaceDetectionResults; /**< structure ObjectDetectionResults */ - -typedef struct _FacialLandMarkDetectionResults { - int number_of_landmarks; - std::vector locations; -} FacialLandMarkDetectionResults; /**< structure ObjectDetectionResults */ - -typedef struct _InferenceResults{ - int dimInfoSize; +/** + * @brief Tensor defined by the dimension and their corresponding data + * @details @a dimInfo is the information + * of a tensor, which is multi-dimension matix. @a data is the data pointer + * corresponding to @a dimInfo. In case of an input image tensor with + * resolution 224 x 224 and chanel 3, for example: + * @code + * // assume that image data address is known + * float *pImagedata = ...; + * std::vector dim{ 1, 3, 224, 224}; + * tensor_t inputTensor; + * inputTensor.dimInfo.push_back(dim); + * inputTensor.data = (void*)pImageData; + * + * // Do something with inputTensor + * @since_tizen 5.5 + */ +typedef struct _tensor_t { std::vector> dimInfo; - std::vector data; -} InferenceResults; /**< structure InferenceResults */ - + std::vector data; +} tensor_t; #ifdef __cplusplus } diff --git a/include/inference_engine_vision.h b/include/inference_engine_vision.h deleted file mode 100644 index 26f4f89..0000000 --- a/include/inference_engine_vision.h +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef __INFERENCE_ENGINE_VISION_H__ -#define __INFERENCE_ENGINE_VISION_H__ - -#include -#include - -#include "inference_engine_type.h" -#include "inference_engine_common.h" -#include - -namespace InferenceEngineInterface { -namespace Vision { - -class IInferenceEngineVision : public virtual Common::IInferenceEngineCommon { -public: - using Common::IInferenceEngineCommon::GetInferenceResult; - virtual ~IInferenceEngineVision() {}; - - // InputTensor - - virtual int SetInputTensorParamInput(int width, int height, int dim, int ch) = 0; - - virtual int SetInputTensorParamNorm(double deviation, double mean) = 0; - - // OutputTensor - - virtual int SetOutputTensorParamThresHold(double threshold) = 0; - - virtual int SetOutputTensorParamNumbers(int number) = 0; - - virtual int SetOutputTensorParamType(int type) = 0; - - // Load and Run - - virtual int Run(cv::Mat tensor) = 0; - - virtual int GetInferenceResult(ImageClassificationResults& results) = 0; - - virtual int GetInferenceResult(ObjectDetectionResults& results) = 0; - - virtual int GetInferenceResult(FaceDetectionResults& results) = 0; - - virtual int GetInferenceResult(FacialLandMarkDetectionResults& results) = 0; - - virtual int GetNumberOfOutputs() = 0; - - virtual void SetUserListName(std::string userlist) = 0; -}; - -typedef void destroy_t(IInferenceEngineVision*); -typedef IInferenceEngineVision* init_t(std::string configFile, std::string weightFile, std::string userFile); -} /* Vision */ -} /* InferenceEngineInterface */ - -#endif /* __INFERENCE_ENGINE_VISION_H__ */ diff --git a/include/inference_engine_vision_impl.h b/include/inference_engine_vision_impl.h index 89a1c13..6d181b5 100644 --- a/include/inference_engine_vision_impl.h +++ b/include/inference_engine_vision_impl.h @@ -20,58 +20,198 @@ #include #include #include "inference_engine_common_impl.h" -#include "inference_engine_vision.h" +#include "inference_engine_common.h" #include "inference_engine_type.h" #include +#include +using namespace InferenceEngineInterface::Common; + +typedef struct _ImageClassficationResults { + int number_of_classes; + std::vector indices; + std::vector names; + std::vector confidences; +} ImageClassificationResults; /**< structure ImageClassificationResults */ + +typedef struct _ObjectDetectionResults { + int number_of_objects; + std::vector indices; + std::vector names; + std::vector confidences; + std::vector locations; +} ObjectDetectionResults; /**< structure ObjectDetectionResults */ + +typedef struct _FaceDetectionResults { + int number_of_faces; + std::vector confidences; + std::vector locations; +} FaceDetectionResults; /**< structure FaceDetectionResults */ + +typedef struct _FacialLandMarkDetectionResults { + int number_of_landmarks; + std::vector locations; +} FacialLandMarkDetectionResults; /**< structure FacialLandMarkDetectionResults */ + namespace InferenceEngineInterface { namespace Vision { -class InferenceEngineVision : public Common::InferenceEngineCommon { +class InferenceEngineVision { public: - using Common::InferenceEngineCommon::GetInferenceResult; + InferenceEngineVision(std::string backend); ~InferenceEngineVision(); + /** + * @brief Intialization. + * @since_tizen 5.5 + */ int Init(std::string configFile, std::string weightFile, std::string UserFile); + /** + * @brief De-initiaization. + * @since_tizen 5.5 + */ void Deinit(); - // Input Tensor parameters + /** + * @brief Set an input node name. + * + * @since_tizen 5.5 + */ + int SetInputTensorParamNode(std::string node); + + /** + * @brief Set an input image's information. + * + * @since_tizen 5.5 + */ int SetInputTensorParamInput(int width, int height, int dim, int ch); + /** + * @brief Set mean and deviation values. + * + * @since_tizen 5.5 + */ int SetInputTensorParamNorm(double deviation, double mean); - // Output Tensor parameters + /** + * @brief Set output nodes' names. + * + * @since_tizen 5.5 + */ + int SetOutputTensorParamNodes(std::vector nodes); + + /** + * @brief Set threshold value. + * + * @since_tizen 5.5 + */ int SetOutputTensorParamThresHold(double threshold); + /** + * @brief Set the number of outputs. + * + * @since_tizen 5.5 + */ int SetOutputTensorParamNumbers(int number); - int SetOutputTensorParamType(int type); - + /** + * @brief Set target device. + * @details See #inference_target_type_e + * + * @since_tizen 5.5 + */ + int SetTargetDevice(inference_target_type_e type); + + /** + * @brief Load model data. + * + * @since_tizen 5.5 + */ + int Load(); + + /** + * @brief Run an inference with a tensor + * + * @since_tizen 5.5 + */ int Run(cv::Mat tensor); + /** + * @brief Get inference results of image classification. + * + * @since_tizen 5.5 + */ int GetInferenceResult(ImageClassificationResults& results); + /** + * @brief Get inference results of object detection. + * + * @since_tizen 5.5 + */ int GetInferenceResult(ObjectDetectionResults& results); + /** + * @brief Get inference results of face detection. + * + * @since_tizen 5.5 + */ int GetInferenceResult(FaceDetectionResults& results); + /** + * @brief Get inference results of facial landmark detection. + * + * @since_tizen 5.5 + */ int GetInferenceResult(FacialLandMarkDetectionResults& results); - int GetNumberOfOutputs(); + /** + * @brief Set SetUserFile. + * @details Image Classification and Object Detection needs a user file + * including labels of classification and objects. + * @since_tizen 5.5 + */ + int SetUserFile(std::string filename); +protected: + /** + * @brief Set an input with a type of cv::Mat + * @details Set an input and pre-process the input + * + * @since_tizen 5.5 + */ + int SetInput(cv::Mat cvImg); + + /** + * @brief Set user list name. + * + * @since_tizen 5.5 + */ void SetUserListName(std::string userlist); private: std::string mBackendLibName; std::vector mUserListName; -protected: - void *handle; - IInferenceEngineVision *engine; + + int mCh; + int mDim; + cv::Size mInputSize; + + double mDeviation; + double mMean; + double mThreshold; + int mOutputNumbers; + cv::Size mSourceSize; + + cv::Mat mInputBuffer; + int mMatType; + + InferenceEngineCommon *mCommonEngine; /**< InferenceEngineCommon is used to + do typical process */ }; } /* Vision */ diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 801b0e6..cb27576 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 5 +Release: 6 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index ae6a2fd..0bf5a37 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -23,6 +23,7 @@ #include #include #include +#include extern "C" { @@ -35,28 +36,48 @@ extern "C" { #define LOG_TAG "INFERENCE_ENGINE_VISION" } +typedef enum { + InputAttrNoType = 0, + InputAttrFloat32 = 1, + InputAttrInt32 = 2, + InputAttrUInt8 = 3, + InputAttrInt64 = 4, + InputAttrString = 5, + InputAttrBool = 6, +} InputAttrType; + namespace InferenceEngineInterface { namespace Vision { -InferenceEngineVision::InferenceEngineVision(std::string backend) : Common::InferenceEngineCommon(backend) + +InferenceEngineVision::InferenceEngineVision(std::string backend) : + mCh(0), + mDim(0), + mInputSize(cv::Size()), + mDeviation(0.0), + mMean(0.0), + mThreshold(0.0), + mOutputNumbers(0), + mSourceSize(cv::Size()), + mInputBuffer(cv::Mat()), + mMatType(0), + mCommonEngine(nullptr) { LOGE("ENTER"); - mBackendLibName = "libinference-engine-" + backend + ".so"; + + mCommonEngine = new InferenceEngineCommon(backend); + LOGE("LEAVE"); } InferenceEngineVision::~InferenceEngineVision() { LOGW("ENTER"); - if (handle) { - destroy_t *engineDestroy = (destroy_t*)dlsym(handle, "EngineVisionDestroy"); - engineDestroy(engine); - dlclose(handle); - engine = nullptr; - handle = nullptr; - Common::InferenceEngineCommon::handle = nullptr; - Common::InferenceEngineCommon::engine = nullptr; + if (mCommonEngine != nullptr) { + delete mCommonEngine; + mCommonEngine = nullptr; } + LOGW("LEAVE"); } @@ -64,146 +85,427 @@ int InferenceEngineVision::Init(std::string configFile, std::string weightFile, std::string userFile) { LOGW("ENTER"); - char *error = NULL; - handle = dlopen(mBackendLibName.c_str(), RTLD_LAZY); - LOGI("HANDLE : [%p]", handle); - if (!handle) { - LOGE("Fail to dlopen %s", mBackendLibName.c_str()); - LOGE("Error: %s\n", dlerror()); - return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + int ret = mCommonEngine->Init(configFile, weightFile); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("Fail to init"); + return ret; } - init_t* EngineInit = (init_t *)dlsym(handle, "EngineVisionInit"); - if ((error = dlerror()) != NULL) { - LOGE("Error: %s\n", error); - dlclose(handle); - return INFERENCE_ENGINE_ERROR_INTERNAL; + size_t userFileLength = userFile.length(); + if (userFileLength > 0 && access(userFile.c_str(), F_OK)) { + LOGE("userFilePath in [%s] ", userFile.c_str()); + return INFERENCE_ENGINE_ERROR_INVALID_PATH; } - engine = EngineInit(configFile, weightFile, userFile); - LOGI("dlopen %s", mBackendLibName.c_str()); + ret = (userFileLength > 0) ? SetUserFile(userFile) : INFERENCE_ENGINE_ERROR_NONE; + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGW("Fail to read categoryList"); + + LOGW("LEAVE"); + return INFERENCE_ENGINE_ERROR_NONE; +} - if (engine == NULL) { - LOGE("Fail to EngineInit"); - dlclose(handle); - return INFERENCE_ENGINE_ERROR_INTERNAL; +int InferenceEngineVision::SetUserFile(std::string filename) +{ + std::ifstream fp(filename.c_str()); + if (!fp.is_open()) { + return INFERENCE_ENGINE_ERROR_INVALID_PATH; } - Common::InferenceEngineCommon::handle = handle; - Common::InferenceEngineCommon::engine = engine; + std::string userListName; + while (!fp.eof()) { + std::getline(fp, userListName); + if (userListName.length()) + SetUserListName(userListName); + } + + fp.close(); - LOGW("LEAVE"); return INFERENCE_ENGINE_ERROR_NONE; } +void InferenceEngineVision::SetUserListName(std::string userlist) +{ + mUserListName.push_back(userlist); +} + void InferenceEngineVision::Deinit() { - ; + mCommonEngine->Deinit(); } int InferenceEngineVision::SetInputTensorParamInput(int width, int height, int dim, int ch) { - int ret = engine->SetInputTensorParamInput(width, height, dim, ch); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetInputTensorParamInput"); + mCh = ch; + mDim = dim; + mInputSize = cv::Size(width, height); - return ret; + LOGI("InputSize is %d x %d\n", mInputSize.width, mInputSize.height); + + return INFERENCE_ENGINE_ERROR_NONE; } int InferenceEngineVision::SetInputTensorParamNorm(double deviation, double mean) { - int ret = engine->SetInputTensorParamNorm(deviation, mean); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetInputTensorParamNorm"); + mDeviation = deviation; + mMean = mean; + + LOGI("mean %.4f, deviation %.4f", mMean, mDeviation); + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceEngineVision::SetInputTensorParamNode(std::string node) +{ + LOGE("ENTER"); + int ret = mCommonEngine->SetInputTensorParamNode(node); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetInputTensorParamNode"); + LOGE("LEAVE"); return ret; } -int InferenceEngineVision::SetOutputTensorParamThresHold(double threshold) +int InferenceEngineVision::SetTargetDevice(inference_target_type_e type) { - int ret = engine->SetOutputTensorParamThresHold(threshold); + int ret = mCommonEngine->SetTargetDevice(type); if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetOutputTensorParamThresHold"); + LOGE("Fail to SetTargetDevice"); return ret; } +int InferenceEngineVision::SetOutputTensorParamThresHold(double threshold) +{ + mThreshold = threshold; + + LOGI("threshold %.4f", mThreshold); + + return INFERENCE_ENGINE_ERROR_NONE; +} + int InferenceEngineVision::SetOutputTensorParamNumbers(int numbers) { - int ret = engine->SetOutputTensorParamNumbers(numbers); - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetOuputTensorParamNumbers"); + mOutputNumbers = numbers; - return ret; + LOGI("outputNumber %d", mOutputNumbers); + + return INFERENCE_ENGINE_ERROR_NONE; } -int InferenceEngineVision::SetOutputTensorParamType(int type) +int InferenceEngineVision::SetOutputTensorParamNodes(std::vector nodes) { - int ret = engine->SetOutputTensorParamType(type); + LOGI("ENTER"); + int ret = mCommonEngine->SetOutputTensorParamNodes(nodes); if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to SetOutputTensorParamType"); + LOGE("Fail to SetOutputTensorParamNodes"); + LOGI("LEAVE"); + return ret; +} + +int InferenceEngineVision::Load() +{ + LOGI("ENTER"); + int ret = mCommonEngine->Load(); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("Fail to load InferenceEngineVision"); + return ret; + } + + //get type and allocate memory to mInputBuffer; + InputAttrType attrType = static_cast(mCommonEngine->GetInputLayerAttrType()); + if (attrType == InputAttrUInt8) { + mMatType = CV_8UC3; + LOGI("InputType is UINT8"); + } + else if (attrType == InputAttrFloat32) { + mMatType = CV_32FC3; + LOGI("InputType FLOAT32"); + } + else { + LOGE("Not supported"); + ret = INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + return ret; + } + + tensor_t inputData; + std::vector info{1, mMatType, mInputSize.height, mInputSize.width}; + inputData.dimInfo.push_back(info); + + // some plug-in (opencv) doesn't allocate memory for input while loading models + // But, others (tflite) allcate memory while loading. + // Thus, the SetInputData() will be implemented in plug-in such as OpenCV, but + // just leave empty in plug-in such as tflite. + ret = mCommonEngine->SetInputDataBuffer(inputData); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("Fail to SetInputData"); + return ret; + } + + void *dataPtr = mCommonEngine->GetInputDataPtr(); + if (dataPtr == nullptr) { + LOGE("input data address is null"); + return INFERENCE_ENGINE_ERROR_INTERNAL; + } + + mInputBuffer = cv::Mat(mInputSize.height, mInputSize.width, mMatType, dataPtr); return ret; } +int InferenceEngineVision::SetInput(cv::Mat cvImg) +{ + mSourceSize = cvImg.size(); + int width = mInputSize.width; + int height = mInputSize.height; + + //PreProcess(); + cv::Mat sample; + if (cvImg.channels() == 3 && mCh == 1) + cv::cvtColor(cvImg, sample, cv::COLOR_BGR2GRAY); + else + sample = cvImg; + + // size + cv::Mat sampleResized; + if (sample.size() != cv::Size(width, height)) + cv::resize(sample, sampleResized, cv::Size(width, height)); + else + sampleResized = sample; + + // type + cv::Mat sampleFloat; + if (mCh == 3) + sampleResized.convertTo(sampleFloat, CV_32FC3); + else + sampleResized.convertTo(sampleFloat, CV_32FC1); + + // normalize + cv::Mat sampleNormalized; + cv::Mat meanMat; + if (mCh == 3) + meanMat = cv::Mat(sampleFloat.size(), CV_32FC3, cv::Scalar((float)mMean, (float)mMean, (float)mMean)); + else + meanMat = cv::Mat(sampleFloat.size(), CV_32FC1, cv::Scalar((float)mMean)); + + cv::subtract(sampleFloat, meanMat, sampleNormalized); + + sampleNormalized /= (float)mDeviation; + + sampleNormalized.convertTo(mInputBuffer, mMatType); + + return INFERENCE_ENGINE_ERROR_NONE; +} + int InferenceEngineVision::Run(cv::Mat tensor) { - int ret = engine->Run(tensor); + LOGI("ENTER"); + int ret = SetInput(tensor); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to SetInput InferenceEngineVision"); + + ret = mCommonEngine->Run(); if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to run InferenceEngineVision"); + LOGI("LEAVE"); return ret; } int InferenceEngineVision::GetInferenceResult(ImageClassificationResults& results) { - int ret = engine->GetInferenceResult(results); + // Will contain top N results in ascending order. + std::vector> top_results; + std::priority_queue, + std::vector>, + std::greater>> top_result_pq; + float value; + + tensor_t outputData; + int ret = mCommonEngine->GetInferenceResult(outputData); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("fail to GetInferenceResults with ImageClassificationResults"); + return ret; + } + + std::vector> inferDimInfo(outputData.dimInfo); + std::vector inferResults(outputData.data.begin(), outputData.data.end()); + + long count = inferDimInfo[0][1]; + LOGI("count: %ld", count); + + float *prediction = reinterpret_cast(inferResults[0]); + for (int i = 0; i < count; ++i) { + value = prediction[i]; + // Only add it if it beats the threshold and has a chance at being in + // the top N. + top_result_pq.push(std::pair(value, i)); + + // If at capacity, kick the smallest value out. + if (top_result_pq.size() > mOutputNumbers) { + top_result_pq.pop(); + } + } + + // Copy to output vector and reverse into descending order. + while (!top_result_pq.empty()) { + top_results.push_back(top_result_pq.top()); + top_result_pq.pop(); + } + std::reverse(top_results.begin(), top_results.end()); + + int classIdx = -1; + results.number_of_classes = 0; + for (int idx = 0; idx < mOutputNumbers; ++idx) { + if (top_results[idx].first < mThreshold) + continue; + LOGI("idx:%d", idx); + LOGI("classIdx: %d", top_results[idx].second); + LOGI("classProb: %f", top_results[idx].first); + + classIdx = top_results[idx].second; + results.indices.push_back(classIdx); + results.confidences.push_back(top_results[idx].first); + results.names.push_back(mUserListName[classIdx]); + results.number_of_classes++; + } - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to GetClassficationResults"); - // NULL CHECK? return ret; } int InferenceEngineVision::GetInferenceResult(ObjectDetectionResults& results) { - int ret = engine->GetInferenceResult(results); + tensor_t outputData; + int ret = mCommonEngine->GetInferenceResult(outputData); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("fail to GetInferenceResults with ObjectDetectionResults"); + return ret; + } - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to GetObjectDetectionResults"); + std::vector> inferDimInfo(outputData.dimInfo); + std::vector inferResults(outputData.data.begin(), outputData.data.end()); + + float* boxes = reinterpret_cast(inferResults[0]); + float* classes = reinterpret_cast(inferResults[1]); + float* scores = reinterpret_cast(inferResults[2]); + int number_of_detections = (int)(reinterpret_cast(inferResults[3])); + + int left, top, right, bottom; + cv::Rect loc; + + results.number_of_objects = 0; + for (int idx = 0; idx < number_of_detections; ++idx) { + if (scores[idx] < mThreshold) + continue; + + left = (int)(boxes[idx*4 + 1] * mSourceSize.width); + top = (int)(boxes[idx*4 + 0] * mSourceSize.height); + right = (int)(boxes[idx*4 + 3] * mSourceSize.width); + bottom = (int)(boxes[idx*4 + 2] * mSourceSize.height); + + loc.x = left; + loc.y = top; + loc.width = right -left + 1; + loc.height = bottom - top + 1; + + results.indices.push_back((int)classes[idx]); + results.confidences.push_back(scores[idx]); + results.names.push_back(mUserListName[(int)classes[idx]]); + results.locations.push_back(loc); + results.number_of_objects++; + + LOGI("objectClass: %d", (int)classes[idx]); + LOGI("confidence:%f", scores[idx]); + LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom); + } return ret; } int InferenceEngineVision::GetInferenceResult(FaceDetectionResults& results) { - int ret = engine->GetInferenceResult(results); + tensor_t outputData; + int ret = mCommonEngine->GetInferenceResult(outputData); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("fail to GetInferenceResults with FaceDetectionResults"); + return ret; + } - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to GetFaceDetectionResults"); + std::vector> inferDimInfo(outputData.dimInfo); + std::vector inferResults(outputData.data.begin(), outputData.data.end()); + + float* boxes = reinterpret_cast(inferResults[0]); + float* classes = reinterpret_cast(inferResults[1]); + float* scores = reinterpret_cast(inferResults[2]); + + int number_of_detections = (int)(reinterpret_cast(inferResults[3])); + int left, top, right, bottom; + cv::Rect loc; + + results.number_of_faces = 0; + for (int idx = 0; idx < number_of_detections; ++idx) { + if (scores[idx] < mThreshold) + continue; + + left = (int)(boxes[idx*4 + 1] * mSourceSize.width); + top = (int)(boxes[idx*4 + 0] * mSourceSize.height); + right = (int)(boxes[idx*4 + 3] * mSourceSize.width); + bottom = (int)(boxes[idx*4 + 2] * mSourceSize.height); + + loc.x = left; + loc.y = top; + loc.width = right -left + 1; + loc.height = bottom - top + 1; + + results.confidences.push_back(scores[idx]); + results.locations.push_back(loc); + results.number_of_faces++; + + LOGI("confidence:%f", scores[idx]); + LOGI("class: %f", classes[idx]); + LOGI("left:%f, top:%f, right:%f, bottom:%f", boxes[idx*4 + 1], boxes[idx*4 + 0], boxes[idx*4 + 3], boxes[idx*4 + 2]); + LOGI("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom); + } return ret; } int InferenceEngineVision::GetInferenceResult(FacialLandMarkDetectionResults& results) { - int ret = engine->GetInferenceResult(results); + + tensor_t outputData; + int ret = mCommonEngine->GetInferenceResult(outputData); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("fail to GetInferenceResults with FacialLandMarkDetectionResults"); + return ret; + } + + std::vector> inferDimInfo(outputData.dimInfo); + std::vector inferResults(outputData.data.begin(), outputData.data.end()); + + long number_of_detections = inferDimInfo[0][1]; + float* loc = reinterpret_cast(inferResults[0]); + + results.number_of_landmarks = 0; if (ret != INFERENCE_ENGINE_ERROR_NONE) LOGE("Fail to GetFacialLandMarkDetectionResults"); - return ret; -} + cv::Point point(0,0); + results.number_of_landmarks = 0; + LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height); + for (int idx = 0; idx < number_of_detections; idx+=2) { + point.x = (int)(loc[idx] * mSourceSize.width); + point.y = (int)(loc[idx+1] * mSourceSize.height); -int InferenceEngineVision::GetNumberOfOutputs() -{ - return engine->GetNumberOfOutputs(); -} + results.locations.push_back(point); + results.number_of_landmarks++; -void InferenceEngineVision::SetUserListName(std::string userlist) -{ - ; + LOGI("x:%d, y:%d", point.x, point.y); + } + + return ret; } + } /* Vision */ } /* InferenceEngineInterface */ -- 2.7.4 From 350c1af03a3c0930278b312e37e314b0fb55ae60 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Tue, 24 Sep 2019 16:49:29 +0900 Subject: [PATCH 09/16] Fix wrong type cast Change-Id: Ieaf42f74bba1eb7f2c832362b519cffb7071dfe9 Signed-off-by: Tae-Young Chung --- packaging/inference-engine-interface.spec | 2 +- vision/inference_engine_vision_impl.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index cb27576..8118191 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 6 +Release: 7 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index 0bf5a37..cf4b32f 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -388,7 +388,7 @@ int InferenceEngineVision::GetInferenceResult(ObjectDetectionResults& results) float* boxes = reinterpret_cast(inferResults[0]); float* classes = reinterpret_cast(inferResults[1]); float* scores = reinterpret_cast(inferResults[2]); - int number_of_detections = (int)(reinterpret_cast(inferResults[3])); + int number_of_detections = (int)(*reinterpret_cast(inferResults[3])); int left, top, right, bottom; cv::Rect loc; @@ -438,7 +438,7 @@ int InferenceEngineVision::GetInferenceResult(FaceDetectionResults& results) float* classes = reinterpret_cast(inferResults[1]); float* scores = reinterpret_cast(inferResults[2]); - int number_of_detections = (int)(reinterpret_cast(inferResults[3])); + int number_of_detections = (int)(*reinterpret_cast(inferResults[3])); int left, top, right, bottom; cv::Rect loc; -- 2.7.4 From 48f59f70379c317200b63986525e86c864ff32b7 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 10 Oct 2019 11:39:55 +0900 Subject: [PATCH 10/16] Fix bugs 1. The number of channels should be applied while allocating memory to mInputBuffer. 2. The length of top_results should be less than its' size, not mOutputNumbers. Change-Id: I0aa01da1a4230803554ab16835966599221b7cbe Signed-off-by: Tae-Young Chung --- packaging/inference-engine-interface.spec | 2 +- vision/inference_engine_vision_impl.cpp | 24 +++++++++++++++++++----- 2 files changed, 20 insertions(+), 6 deletions(-) diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 8118191..06a183e 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 7 +Release: 8 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index cf4b32f..5dd57a6 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -215,12 +215,26 @@ int InferenceEngineVision::Load() //get type and allocate memory to mInputBuffer; InputAttrType attrType = static_cast(mCommonEngine->GetInputLayerAttrType()); if (attrType == InputAttrUInt8) { - mMatType = CV_8UC3; - LOGI("InputType is UINT8"); + LOGI("InputType is %d ch with UINT8", mCh); + if (mCh == 1) { + mMatType = CV_8UC1; + } else if (mCh == 3) { + mMatType = CV_8UC3; + } else { + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } } else if (attrType == InputAttrFloat32) { - mMatType = CV_32FC3; - LOGI("InputType FLOAT32"); + LOGI("InputType is %d ch with FLOAT32", mCh); + if (mCh == 1) { + mMatType = CV_32FC1; + } else if (mCh == 3) { + mMatType = CV_32FC3; + } else { + LOGE("Not supported"); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT; + } } else { LOGE("Not supported"); @@ -356,7 +370,7 @@ int InferenceEngineVision::GetInferenceResult(ImageClassificationResults& result int classIdx = -1; results.number_of_classes = 0; - for (int idx = 0; idx < mOutputNumbers; ++idx) { + for (int idx = 0; idx < top_results.size(); ++idx) { if (top_results[idx].first < mThreshold) continue; LOGI("idx:%d", idx); -- 2.7.4 From 746c49b16cba7d67479737c6d479f53d819491cd Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Fri, 4 Oct 2019 17:29:19 +0900 Subject: [PATCH 11/16] Add 'SetLibraryPath' api and INI parser. It is needed when user want to set full path of so file. Change-Id: I397f9750733454fa5b75c064a61034987bb41f22 Signed-off-by: Hyunsoo Park --- CMakeLists.txt | 8 +-- common/inference_engine_common_impl.cpp | 74 ++++++++++++++++++++++++++- common/inference_ini.cpp | 83 +++++++++++++++++++++++++++++++ include/inference_engine_common.h | 1 + include/inference_engine_common_impl.h | 8 ++- include/inference_engine_ini.h | 67 +++++++++++++++++++++++++ include/inference_engine_type.h | 23 +++++++-- include/inference_engine_vision_impl.h | 4 +- packaging/inference-engine-interface.spec | 7 +-- vision/inference_engine_vision_impl.cpp | 10 +++- 10 files changed, 266 insertions(+), 19 deletions(-) create mode 100644 common/inference_ini.cpp create mode 100644 include/inference_engine_ini.h diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ca6ea7..3137cee 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -11,7 +11,7 @@ SET(PREFIX ${CMAKE_INSTALL_PREFIX}) SET(INC_DIR "${PROJECT_SOURCE_DIR}/include") -SET(dependents "dlog") +SET(dependents "dlog iniparser") SET(pc_dependents "capi-base-common") INCLUDE(FindPkgConfig) @@ -45,7 +45,7 @@ SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=${LIB_INSTALL_DIR}") aux_source_directory(common SOURCES) ADD_LIBRARY(${fw_name_common} SHARED ${SOURCES}) -TARGET_LINK_LIBRARIES(${fw_name_common} dlog) +TARGET_LINK_LIBRARIES(${fw_name_common} dlog iniparser) SET_TARGET_PROPERTIES(${fw_name_common} @@ -79,7 +79,7 @@ INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name_common}.pc DESTINATION ${LIB aux_source_directory(vision SOURCES) ADD_LIBRARY(${fw_name_vision} SHARED ${SOURCES}) -TARGET_LINK_LIBRARIES(${fw_name_vision} ${OpenCV_LIBS} dlog) +TARGET_LINK_LIBRARIES(${fw_name_vision} ${OpenCV_LIBS} dlog stdc++fs) SET_TARGET_PROPERTIES(${fw_name_vision} @@ -138,4 +138,4 @@ ADD_CUSTOM_COMMAND( VERBATIM ) -ENDIF(UNIX) \ No newline at end of file +ENDIF(UNIX) diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index d1aa2ce..e60b82b 100644 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -16,12 +16,13 @@ #include "inference_engine_error.h" #include "inference_engine_common_impl.h" - +#include "inference_engine_ini.h" #include #include #include #include #include +#include extern "C" { @@ -34,8 +35,21 @@ extern "C" { #define LOG_TAG "INFERENCE_ENGINE_COMMON" } +namespace fs = std::experimental::filesystem; namespace InferenceEngineInterface { namespace Common { +InferenceEngineCommon::InferenceEngineCommon() : + handle(nullptr), + engine(nullptr) +{ + LOGE("ENTER"); + InferenceEngineInI ini; + ini.LoadInI(); + mSelectedBackendEngine = static_cast(ini.GetSelectedBackendEngine()); + SetBackendEngine(mSelectedBackendEngine); + LOGI("Backend engine is selected by ini file [%d]", mSelectedBackendEngine); + LOGE("LEAVE"); +} InferenceEngineCommon::InferenceEngineCommon(std::string backend) : handle(nullptr), @@ -43,7 +57,16 @@ InferenceEngineCommon::InferenceEngineCommon(std::string backend) : { LOGE("ENTER"); mBackendLibName = "libinference-engine-" + backend + ".so"; - LOGE("lib: %s", mBackendLibName.c_str()); + LOGE("LEAVE"); +} + +InferenceEngineCommon::InferenceEngineCommon(inference_backend_type_e backend) : + handle(nullptr), + engine(nullptr) +{ + LOGE("ENTER"); + SetBackendEngine(backend); + LOGI("Backend engine is selected by enum input[%d] set[%d]", backend, mSelectedBackendEngine); LOGE("LEAVE"); } @@ -200,5 +223,52 @@ int InferenceEngineCommon::GetInferenceResult(tensor_t& results) LOGE("LEAVE"); return ret; } + +int InferenceEngineCommon::SetLibraryPath(std::string path) +{ + LOGE("ENTER"); + if (path.empty()) + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + + if (fs::is_directory(path)) { + if(path.back() != '/') + path += "/"; + + mBackendLibName = path + mBackendLibName; + } + else { + if (fs::is_regular_file(path)){ + mBackendLibName = path; + } + else { + LOGE("Fail to find path. [%s]", path.c_str()); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + } + LOGE("lib: %s", mBackendLibName.c_str()); + LOGE("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceEngineCommon::SetBackendEngine(inference_backend_type_e backend) +{ + std::string backendString; + switch(backend){ + case INFERENCE_BACKEND_OPENCV: + backendString = "opencv"; + break; + case INFERENCE_BACKEND_TFLITE: + backendString = "tflite"; + break; + default: + LOGE("Not supported backend engine [%d]", backend); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } + + mBackendLibName = "libinference-engine-" + backendString + ".so"; + + return INFERENCE_ENGINE_ERROR_NONE; +} } /* Common */ } /* InferenceEngineInterface */ diff --git a/common/inference_ini.cpp b/common/inference_ini.cpp new file mode 100644 index 0000000..d4fcbcf --- /dev/null +++ b/common/inference_ini.cpp @@ -0,0 +1,83 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "inference_engine_ini.h" +#include "inference_engine_error.h" + +#include +#include + +extern "C" { + +#include + +#ifdef LOG_TAG +#undef LOG_TAG +#endif + +#define LOG_TAG "INFERENCE_ENGINE_COMMON" +} + +namespace InferenceEngineInterface { +namespace Common { + +const std::string INFERENCE_INI_FILENAME = "/inference/inference_engine.ini"; + +InferenceEngineInI::InferenceEngineInI() : + mIniDefaultPath(SYSCONFDIR) +{ + LOGE("ENTER"); + mIniDefaultPath += INFERENCE_INI_FILENAME; + LOGE("LEAVE"); +} + +InferenceEngineInI::~InferenceEngineInI() +{ + ; +} + +int InferenceEngineInI::LoadInI() +{ + LOGE("ENTER"); + dictionary *dict = iniparser_load(mIniDefaultPath.c_str()); + if (dict == NULL) { + LOGE("Fail to load ini"); + return -1; + } + + mSelectedBackendEngine = static_cast(iniparser_getint(dict, "inference backend:selected backend engine", -1)); + + if(dict) { + iniparser_freedict(dict); + dict = NULL; + } + + LOGE("LEAVE"); + return 0; +} + +void InferenceEngineInI::UnLoadInI() +{ + ; +} + +int InferenceEngineInI::GetSelectedBackendEngine() +{ + return mSelectedBackendEngine; +} + +} /* Inference */ +} /* MediaVision */ diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h index 78ba3c8..2f312eb 100644 --- a/include/inference_engine_common.h +++ b/include/inference_engine_common.h @@ -116,6 +116,7 @@ public: * @since_tizen 5.5 */ virtual int GetInferenceResult(tensor_t& results) = 0; + virtual int SetLibraryPath(std::string path) = 0; }; typedef void destroy_t(IInferenceEngineCommon*); diff --git a/include/inference_engine_common_impl.h b/include/inference_engine_common_impl.h index 6402f5e..c6a18ae 100644 --- a/include/inference_engine_common_impl.h +++ b/include/inference_engine_common_impl.h @@ -29,8 +29,12 @@ namespace Common { class InferenceEngineCommon { public: + InferenceEngineCommon(); + InferenceEngineCommon(std::string backend); + InferenceEngineCommon(inference_backend_type_e backend); + ~InferenceEngineCommon(); /** @@ -129,10 +133,12 @@ public: * @since_tizen 5.5 */ int GetInferenceResult(tensor_t& results); + int SetLibraryPath(std::string path); + int SetBackendEngine(inference_backend_type_e backend); private: std::string mBackendLibName; - + inference_backend_type_e mSelectedBackendEngine; protected: void *handle; IInferenceEngineCommon *engine; diff --git a/include/inference_engine_ini.h b/include/inference_engine_ini.h new file mode 100644 index 0000000..905e008 --- /dev/null +++ b/include/inference_engine_ini.h @@ -0,0 +1,67 @@ +/** + * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_INI_H__ +#define __INFERENCE_ENGINE_INI_H__ + +#include +#include +#include + +namespace InferenceEngineInterface { +namespace Common { + +class InferenceEngineInI { +public: + /** + * @brief Creates an Inference ini class instance. + * + * @since_tizen 5.5 + */ + InferenceEngineInI(); + + /** + * @brief Destroys an Inference ini class instance including + * its all resources. + * + * @since_tizen 5.5 + */ + ~InferenceEngineInI(); + + /** + * @brief Load InI class() + * + * @since_tizen 5.5 + */ + int LoadInI(); + + /** + * @brief UnLoad InI class() + * + * @since_tizen 5.5 + */ + void UnLoadInI(); + + int GetSelectedBackendEngine(); +private: + std::string mIniDefaultPath; + inference_backend_type_e mSelectedBackendEngine; +}; + +} /* InferenceEngineInterface */ +} /* Common */ + +#endif /* __INFERENCE_ENGINE_INI_H__ */ diff --git a/include/inference_engine_type.h b/include/inference_engine_type.h index 425ba52..1cf820e 100644 --- a/include/inference_engine_type.h +++ b/include/inference_engine_type.h @@ -34,11 +34,24 @@ extern "C" { * */ typedef enum { - INFERENCE_TARGET_NONE = -1, - INFERENCE_TARGET_CPU, /**< CPU */ - INFERENCE_TARGET_GPU, /**< GPU */ - INFERENCE_TARGET_CUSTOM, /**< NPU */ - INFERENCE_TARGET_MAX + INFERENCE_BACKEND_NONE = -1, /**< None */ + INFERENCE_BACKEND_OPENCV, /**< OpenCV */ + INFERENCE_BACKEND_TFLITE, /**< TensorFlow-Lite */ + INFERENCE_BACKEND_MAX /**< Backend MAX */ +} inference_backend_type_e; + +/** + * @brief Enumeration for inference target. + * + * @since_tizen 5.5 + * + */ +typedef enum { + INFERENCE_TARGET_NONE = -1, + INFERENCE_TARGET_CPU, /**< CPU */ + INFERENCE_TARGET_GPU, /**< GPU */ + INFERENCE_TARGET_CUSTOM, /**< NPU */ + INFERENCE_TARGET_MAX } inference_target_type_e; /** diff --git a/include/inference_engine_vision_impl.h b/include/inference_engine_vision_impl.h index 6d181b5..2f76daa 100644 --- a/include/inference_engine_vision_impl.h +++ b/include/inference_engine_vision_impl.h @@ -59,7 +59,6 @@ namespace Vision { class InferenceEngineVision { public: - InferenceEngineVision(std::string backend); ~InferenceEngineVision(); @@ -176,7 +175,7 @@ public: * @since_tizen 5.5 */ int SetUserFile(std::string filename); - + int SetLibraryPath(std::string path); protected: /** * @brief Set an input with a type of cv::Mat @@ -192,7 +191,6 @@ protected: * @since_tizen 5.5 */ void SetUserListName(std::string userlist); - private: std::string mBackendLibName; std::vector mUserListName; diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 06a183e..4659c89 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 8 +Release: 9 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz @@ -11,6 +11,7 @@ BuildRequires: pkgconfig(libtzplatform-config) BuildRequires: pkgconfig(capi-base-common) BuildRequires: pkgconfig(opencv) >= 3.4.1 BuildRequires: pkgconfig(python) +BuildRequires: pkgconfig(iniparser) %description Interface of inference engines @@ -64,8 +65,8 @@ export CXXFLAGS="$CXXFLAGS -DTIZEN_DEBUG_ENABLE" export FFLAGS="$FFLAGS -DTIZEN_DEBUG_ENABLE" %endif -export CFLAGS+=" -DPATH_LIBDIR=\\\"%{_libdir}\\\"" -export CXXFLAGS+=" -DPATH_LIBDIR=\\\"%{_libdir}\\\"" +export CFLAGS+=" -DPATH_LIBDIR=\\\"%{_libdir}\\\" -DSYSCONFDIR=\\\"%{_sysconfdir}\\\"" +export CXXFLAGS+=" -DPATH_LIBDIR=\\\"%{_libdir}\\\" -DSYSCONFDIR=\\\"%{_sysconfdir}\\\"" MAJORVER=`echo %{version} | awk 'BEGIN {FS="."}{print $1}'` %cmake . -DFULLVER=%{version} -DMAJORVER=${MAJORVER} -DTZ_SYS_BIN=%TZ_SYS_BIN \ diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index 5dd57a6..a2247a5 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -48,7 +48,6 @@ typedef enum { namespace InferenceEngineInterface { namespace Vision { - InferenceEngineVision::InferenceEngineVision(std::string backend) : mCh(0), mDim(0), @@ -521,5 +520,14 @@ int InferenceEngineVision::GetInferenceResult(FacialLandMarkDetectionResults& re return ret; } +int InferenceEngineVision::SetLibraryPath(std::string path) +{ + int ret = 0; + ret = mCommonEngine->SetLibraryPath(path); + if (ret != INFERENCE_ENGINE_ERROR_NONE) + LOGE("Fail to run SetLibraryPath"); + LOGI("LEAVE"); + return ret; +} } /* Vision */ } /* InferenceEngineInterface */ -- 2.7.4 From 840cdd9bd40c49bcf19fe066d8c3bf4d1467e5f9 Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Tue, 8 Oct 2019 14:08:13 +0900 Subject: [PATCH 12/16] Remove unreachable code. Change-Id: Ia28161f5a1d84c10389f1eaaadf53c1cb164d58a Signed-off-by: Hyunsoo Park --- packaging/inference-engine-interface.spec | 2 +- vision/inference_engine_vision_impl.cpp | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 4659c89..76cfadb 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 9 +Release: 10 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz diff --git a/vision/inference_engine_vision_impl.cpp b/vision/inference_engine_vision_impl.cpp index a2247a5..7263d89 100644 --- a/vision/inference_engine_vision_impl.cpp +++ b/vision/inference_engine_vision_impl.cpp @@ -501,9 +501,6 @@ int InferenceEngineVision::GetInferenceResult(FacialLandMarkDetectionResults& re results.number_of_landmarks = 0; - if (ret != INFERENCE_ENGINE_ERROR_NONE) - LOGE("Fail to GetFacialLandMarkDetectionResults"); - cv::Point point(0,0); results.number_of_landmarks = 0; LOGI("imgW:%d, imgH:%d", mSourceSize.width, mSourceSize.height); -- 2.7.4 From 1aea903f45108ff9d7cf0e9d3ca385f42b1385e5 Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Thu, 10 Oct 2019 17:08:43 +0900 Subject: [PATCH 13/16] Remove SetLibraryPath api on pure interface. It is depends on each engine. So i remove this api on pure interface. Change-Id: I55254ad54cd1fe84373b876da1a96d2a18d2e243 Signed-off-by: Hyunsoo Park --- include/inference_engine_common.h | 1 - packaging/inference-engine-interface.spec | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/include/inference_engine_common.h b/include/inference_engine_common.h index 2f312eb..78ba3c8 100644 --- a/include/inference_engine_common.h +++ b/include/inference_engine_common.h @@ -116,7 +116,6 @@ public: * @since_tizen 5.5 */ virtual int GetInferenceResult(tensor_t& results) = 0; - virtual int SetLibraryPath(std::string path) = 0; }; typedef void destroy_t(IInferenceEngineCommon*); diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 76cfadb..8b82e56 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 10 +Release: 11 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz -- 2.7.4 From 18d9157f461b17c5a1ad7059433631b11936361e Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Tue, 15 Oct 2019 12:07:18 +0900 Subject: [PATCH 14/16] Add initializer in constructor for local variable. New class variable was added, but it doesn't be initialize. So i add init codes. Change-Id: I47d9f9299395af8b73898859e176c65ade014bcb Signed-off-by: Hyunsoo Park --- common/inference_engine_common_impl.cpp | 3 +++ common/inference_ini.cpp | 11 ++++++----- packaging/inference-engine-interface.spec | 2 +- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/common/inference_engine_common_impl.cpp b/common/inference_engine_common_impl.cpp index e60b82b..0ed52e2 100644 --- a/common/inference_engine_common_impl.cpp +++ b/common/inference_engine_common_impl.cpp @@ -39,6 +39,7 @@ namespace fs = std::experimental::filesystem; namespace InferenceEngineInterface { namespace Common { InferenceEngineCommon::InferenceEngineCommon() : + mSelectedBackendEngine(INFERENCE_BACKEND_NONE), handle(nullptr), engine(nullptr) { @@ -52,6 +53,7 @@ InferenceEngineCommon::InferenceEngineCommon() : } InferenceEngineCommon::InferenceEngineCommon(std::string backend) : + mSelectedBackendEngine(INFERENCE_BACKEND_NONE), handle(nullptr), engine(nullptr) { @@ -61,6 +63,7 @@ InferenceEngineCommon::InferenceEngineCommon(std::string backend) : } InferenceEngineCommon::InferenceEngineCommon(inference_backend_type_e backend) : + mSelectedBackendEngine(INFERENCE_BACKEND_NONE), handle(nullptr), engine(nullptr) { diff --git a/common/inference_ini.cpp b/common/inference_ini.cpp index d4fcbcf..ed05547 100644 --- a/common/inference_ini.cpp +++ b/common/inference_ini.cpp @@ -37,16 +37,17 @@ namespace Common { const std::string INFERENCE_INI_FILENAME = "/inference/inference_engine.ini"; InferenceEngineInI::InferenceEngineInI() : - mIniDefaultPath(SYSCONFDIR) + mIniDefaultPath(SYSCONFDIR), + mSelectedBackendEngine(INFERENCE_BACKEND_NONE) { - LOGE("ENTER"); - mIniDefaultPath += INFERENCE_INI_FILENAME; - LOGE("LEAVE"); + LOGE("ENTER"); + mIniDefaultPath += INFERENCE_INI_FILENAME; + LOGE("LEAVE"); } InferenceEngineInI::~InferenceEngineInI() { - ; + ; } int InferenceEngineInI::LoadInI() diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 8b82e56..c091e0d 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 11 +Release: 12 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz -- 2.7.4 From 2a20d192e6878547557aa4cf0fca064bcbf3aa8f Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Thu, 7 Nov 2019 15:12:33 +0900 Subject: [PATCH 15/16] Fix wrong PC NAME and add library link. PC NAME of common package is set wrong because of misspellings. And stdc++fs was not linked on common package. So i changed these to right way. Change-Id: I410fe525dbf2f2c677c044329b603928a8f1772a Signed-off-by: Hyunsoo Park --- CMakeLists.txt | 4 ++-- packaging/inference-engine-interface.spec | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 3137cee..2476ab7 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -45,7 +45,7 @@ SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=${LIB_INSTALL_DIR}") aux_source_directory(common SOURCES) ADD_LIBRARY(${fw_name_common} SHARED ${SOURCES}) -TARGET_LINK_LIBRARIES(${fw_name_common} dlog iniparser) +TARGET_LINK_LIBRARIES(${fw_name_common} dlog iniparser stdc++fs) SET_TARGET_PROPERTIES(${fw_name_common} @@ -63,7 +63,7 @@ INSTALL( PATTERN "*.h" ) -SET(PC_NAME ${fw_namefw_name_common_vision}) +SET(PC_NAME ${fw_name_common}) SET(PC_REQUIRED ${pc_dependents}) SET(PC_LDFLAGS -l${fw_name_common}) SET(PC_CFLAGS -I\${includedir}/media) diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index c091e0d..29c213a 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,7 +1,7 @@ Name: inference-engine-interface Summary: Interface of inference engines Version: 0.0.1 -Release: 12 +Release: 13 Group: Multimedia/Framework License: Apache-2.0 Source0: %{name}-%{version}.tar.gz -- 2.7.4 From 1e4d3aad2e5e2b249995145115aafd785fde9e48 Mon Sep 17 00:00:00 2001 From: Tae-Young Chung Date: Thu, 2 Jan 2020 17:32:57 +0900 Subject: [PATCH 16/16] Migration with OpenCV-4.2.0 Change-Id: Ie5960bb38c613ec13fa795c6ee3ce48fca3ad1d8 Signed-off-by: Tae-Young Chung --- packaging/inference-engine-interface.spec | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 29c213a..6e1dc7e 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -1,6 +1,6 @@ Name: inference-engine-interface Summary: Interface of inference engines -Version: 0.0.1 +Version: 0.0.2 Release: 13 Group: Multimedia/Framework License: Apache-2.0 @@ -9,7 +9,7 @@ BuildRequires: cmake BuildRequires: pkgconfig(dlog) BuildRequires: pkgconfig(libtzplatform-config) BuildRequires: pkgconfig(capi-base-common) -BuildRequires: pkgconfig(opencv) >= 3.4.1 +BuildRequires: pkgconfig(opencv4) BuildRequires: pkgconfig(python) BuildRequires: pkgconfig(iniparser) -- 2.7.4