From 4a650890e5f3b1d6264425d612a1e6e04f014901 Mon Sep 17 00:00:00 2001 From: Inki Dae Date: Wed, 27 May 2020 17:29:46 +0900 Subject: [PATCH] Add init code Signed-off-by: Inki Dae --- AUTHORS | 1 + CMakeLists.txt | 94 +++++ LICENSE.APLv2 | 206 ++++++++++ NOTICE | 3 + README.md | 3 + inference-engine-mlapi.manifest | 5 + packaging/inference-engine-mlapi.spec | 46 +++ src/inference_engine_nnstreamer.cpp | 445 ++++++++++++++++++++++ src/inference_engine_nnstreamer_private.h | 85 +++++ 9 files changed, 888 insertions(+) create mode 100644 AUTHORS create mode 100644 CMakeLists.txt create mode 100644 LICENSE.APLv2 create mode 100644 NOTICE create mode 100644 README.md create mode 100644 inference-engine-mlapi.manifest create mode 100644 packaging/inference-engine-mlapi.spec create mode 100644 src/inference_engine_nnstreamer.cpp create mode 100644 src/inference_engine_nnstreamer_private.h diff --git a/AUTHORS b/AUTHORS new file mode 100644 index 0000000..7bc7f2d --- /dev/null +++ b/AUTHORS @@ -0,0 +1 @@ +Inki Dae diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..d7aab7a --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,94 @@ + +CMAKE_MINIMUM_REQUIRED(VERSION 2.6) +SET(fw_name "inference-engine-nnstreamer") + +PROJECT(${fw_name}) + +SET(CMAKE_INSTALL_PREFIX /usr) +SET(PREFIX ${CMAKE_INSTALL_PREFIX}) + +set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS OFF) + +SET(INC_DIR "${PROJECT_SOURCE_DIR}/include") +SET(dependents "dlog inference-engine-interface-common capi-nnstreamer") + +INCLUDE(FindPkgConfig) +pkg_check_modules(${fw_name} REQUIRED ${dependents}) +FOREACH(flag ${${fw_name}_CFLAGS}) + SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}") + SET(EXTRA_CXXFLAGS "${EXTRA_CXXFLAGS} ${flag}") +ENDFOREACH(flag) + +FOREACH(flag ${${fw_name}_LDFLAGS}) + SET(EXTRA_LDFLAGS "${EXTRA_LDFLAGS} ${flag}") +ENDFOREACH(flag) +#Remove leading whitespace POLICY CMP0004 +STRING(REGEX REPLACE "^ " "" EXTRA_LDFLAGS ${EXTRA_LDFLAGS}) + +SET(CMAKE_C_FLAGS "-I./include -I./include/headers ${CMAKE_C_FLAGS} ${EXTRA_CFLAGS} -fPIC -Wall -w") +SET(CMAKE_C_FLAGS_DEBUG "-O0 -g") + +SET(CMAKE_CXX_FLAGS "-I./include -I./include/headers ${CMAKE_CXX_FLAGS} ${EXTRA_CXXFLAGS} -fPIC") +SET(CMAKE_CXX_FLAGS_DEBUG "-O0 -g --w") + +ADD_DEFINITIONS("-DPREFIX=\"${CMAKE_INSTALL_PREFIX}\"") +ADD_DEFINITIONS("-DTIZEN_DEBUG") + +SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=${LIB_INSTALL_DIR}") + +aux_source_directory(src SOURCES) +ADD_LIBRARY(${fw_name} SHARED ${SOURCES}) + +TARGET_LINK_LIBRARIES(${fw_name} ${EXTRA_LDFLAGS}) + + +SET_TARGET_PROPERTIES(${fw_name} + PROPERTIES + CLEAN_DIRECT_OUTPUT 1 +) + +INSTALL(TARGETS ${fw_name} DESTINATION ${LIB_INSTALL_DIR}) +INSTALL( + DIRECTORY ${INC_DIR}/ DESTINATION include/media + FILES_MATCHING + PATTERN "*_private.h" EXCLUDE + PATTERN "*.h" + ) + +SET(PC_NAME ${fw_name}) +SET(PC_REQUIRED ${pc_dependents}) +SET(PC_LDFLAGS -l${fw_name}) +SET(PC_CFLAGS -I\${includedir}/media) + +IF(UNIX) + +ADD_CUSTOM_TARGET (distclean @echo cleaning for source distribution) +ADD_CUSTOM_COMMAND( + DEPENDS clean + COMMENT "distribution clean" + COMMAND find + ARGS . + -not -name config.cmake -and \( + -name tester.c -or + -name Testing -or + -name CMakeFiles -or + -name cmake.depends -or + -name cmake.check_depends -or + -name CMakeCache.txt -or + -name cmake.check_cache -or + -name *.cmake -or + -name Makefile -or + -name core -or + -name core.* -or + -name gmon.out -or + -name install_manifest.txt -or + -name *.pc -or + -name *~ \) + | grep -v TC | xargs rm -rf + TARGET distclean + VERBATIM +) + +ENDIF(UNIX) diff --git a/LICENSE.APLv2 b/LICENSE.APLv2 new file mode 100644 index 0000000..bbe9d02 --- /dev/null +++ b/LICENSE.APLv2 @@ -0,0 +1,206 @@ +Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. All rights reserved. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000..0e0f016 --- /dev/null +++ b/NOTICE @@ -0,0 +1,3 @@ +Copyright (c) Samsung Electronics Co., Ltd. All rights reserved. +Except as noted, this software is licensed under Apache License, Version 2. +Please, see the LICENSE.APLv2 file for Apache License terms and conditions. diff --git a/README.md b/README.md new file mode 100644 index 0000000..5d441e1 --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# inference-engine-mlapi +This is Tizen MediaVision inference engine ML Single API backend of NNStreamer +for the use of NNFW, Vivante and TRIV2 NPU. diff --git a/inference-engine-mlapi.manifest b/inference-engine-mlapi.manifest new file mode 100644 index 0000000..a76fdba --- /dev/null +++ b/inference-engine-mlapi.manifest @@ -0,0 +1,5 @@ + + + + + diff --git a/packaging/inference-engine-mlapi.spec b/packaging/inference-engine-mlapi.spec new file mode 100644 index 0000000..1ea50ae --- /dev/null +++ b/packaging/inference-engine-mlapi.spec @@ -0,0 +1,46 @@ +Name: inference-engine-mlapi +Summary: ML Single API backend of NNStreamer for MediaVision +Version: 0.0.1 +Release: 1 +Group: Multimedia/Libraries +License: Apache-2.0 +ExclusiveArch: %{arm} aarch64 +Source0: %{name}-%{version}.tar.gz +Requires(post): /sbin/ldconfig +Requires(postun): /sbin/ldconfig +BuildRequires: cmake +BuildRequires: python +BuildRequires: pkgconfig(dlog) +BuildRequires: pkgconfig(inference-engine-interface-common) +BuildRequires: capi-nnstreamer-devel + +%description +ML Single API backend of NNStreamer for MediaVision + +%prep +%setup -q + +%build +%if 0%{?sec_build_binary_debug_enable} +export CFLAGS="$CFLAGS -DTIZEN_DEBUG_ENABLE" +export CXXFLAGS="$CXXFLAGS -DTIZEN_DEBUG_ENABLE" +export FFLAGS="$FFLAGS -DTIZEN_DEBUG_ENABLE" +%endif + +%cmake . + +make %{?jobs:-j%jobs} + +%install +rm -rf %{buildroot} + +%make_install + +%post -p /sbin/ldconfig +%postun -p /sbin/ldconfig + +%files +%manifest %{name}.manifest +%license LICENSE.APLv2 +%defattr(-,root,root,-) +%{_libdir}/*.so diff --git a/src/inference_engine_nnstreamer.cpp b/src/inference_engine_nnstreamer.cpp new file mode 100644 index 0000000..e3ad84a --- /dev/null +++ b/src/inference_engine_nnstreamer.cpp @@ -0,0 +1,445 @@ +/** + * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "inference_engine_nnstreamer_private.h" + +#include +#include +#include +#include +#include + +namespace InferenceEngineImpl { +namespace MLAPIImpl { + +InferenceMLAPI::InferenceMLAPI(void) : + mPluginType(), + mTargetDevice(), + mSingle(), + mDesignated_inputs(), + mDesignated_outputs(), + mInputProperty(), + mOutputProperty(), + mInputTensorBuffer(), + mOutputTensorBuffer(), + mInputTensorInfo(), + mOutputTensorInfo() +{ + LOGI("ENTER"); + + LOGI("LEAVE"); +} + +InferenceMLAPI::~InferenceMLAPI() +{ + mDesignated_inputs.clear(); + std::vector().swap(mDesignated_inputs); + + mDesignated_outputs.clear(); + std::vector().swap(mDesignated_outputs); +} + +int InferenceMLAPI::SetPluginType(int type) +{ + LOGI("ENTER"); + + if (INFERENCE_BACKEND_NNFW != type && INFERENCE_BACKEND_VIVANTE != type) { + LOGE("Invalid backend type."); + return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED; + } + + mPluginType = type; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::SetTargetDevices(int types) +{ + LOGI("ENTER"); + + + LOGI("Inference targets are, "); + if (types & INFERENCE_TARGET_CPU) { + mTargetDevice |= INFERENCE_TARGET_CPU; + LOGI("CPU"); + } + + if (types & INFERENCE_TARGET_GPU) { + mTargetDevice |= INFERENCE_TARGET_GPU; + LOGI("GPU"); + } + + if (types & INFERENCE_TARGET_CUSTOM) { + mTargetDevice |= INFERENCE_TARGET_CUSTOM; + LOGI("NPU"); + } + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::Load(std::vector model_paths, inference_model_format_e model_format) +{ + LOGI("ENTER"); + + // ML Single API of MLAPI requires model_paths rule like below, + // "so library file path,nb model file path" or vise versa. + std::string model_str(model_paths[0] + "," + model_paths[1]); + + LOGI("Model name = %s", model_str.c_str()); + + // TODO. Set NNFW backend type and HW type properly. + + ml_nnfw_type_e nnfw_type; + ml_nnfw_hw_e nnfw_hw; + + switch (mPluginType) { + case INFERENCE_BACKEND_VIVANTE: + nnfw_type = ML_NNFW_TYPE_VIVANTE; + nnfw_hw = ML_NNFW_HW_ANY; + LOGI("Vivante tensor filter will be used."); + break; + case INFERENCE_BACKEND_NNFW: + nnfw_type = ML_NNFW_TYPE_NNFW; + if (mTargetDevice == INFERENCE_TARGET_CPU) { + nnfw_hw = ML_NNFW_HW_CPU_NEON; + LOGI("Target device is NEON."); + } else if (mTargetDevice == INFERENCE_TARGET_GPU) { + nnfw_hw = ML_NNFW_HW_GPU; + LOGI("Target device is GPU"); + } else { + LOGE("Invalid inference target device type."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + LOGI("NNFW tensor filter will be used."); + break; + // TODO. + default: + LOGE("Invalid plugin type."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + + int ret = ml_single_open(&mSingle, model_str.c_str(), NULL, NULL, nnfw_type, nnfw_hw); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_open(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetInputTensorBuffers(std::vector &buffers) +{ + LOGI("ENTER"); + + // TODO. Implement this function according to a given nnstreamer backend properly. + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetOutputTensorBuffers(std::vector &buffers) +{ + LOGI("ENTER"); + + // Output tensor buffers will be allocated by a backend plugin of nnstreamer + // So add a null tensor buffer object. This buffer will be updated at Run callback. + + // Caution. this tensor buffer will be checked by upper framework to verity if + // the tensor buffer object is valid or not so fill dummy data to the tensor buffer. + + // TODO. Consider multiple output tensors. + + inference_engine_tensor_buffer tensor_buf = { 0, }; + tensor_buf.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16; + tensor_buf.buffer = (void *)1; + tensor_buf.size = 1; + tensor_buf.owner_is_backend = 1; + buffers.push_back(tensor_buf); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + + +int InferenceMLAPI::GetInputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + ml_tensors_info_h in_info = NULL; + + // TODO. Need to check if model file loading is done. + + int ret = ml_single_get_input_info(mSingle, &in_info); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_get_input_info(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + unsigned int cnt; + ret = ml_tensors_info_get_count(in_info, &cnt); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_count(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor count = %u", cnt); + + for (unsigned int i = 0; i < cnt; ++i) { + ml_tensor_type_e in_type; + unsigned int in_dim; + char *in_name = NULL; + size_t in_size; + + ret = ml_tensors_info_get_tensor_type(in_info, i, &in_type); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_type(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor type = %d", in_type); + + ret = ml_tensors_info_get_tensor_dimension(in_info, i, &in_dim); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_dimension(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor dimension = %u", in_dim); + + ret = ml_tensors_info_get_tensor_name(in_info, i, &in_name); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_name(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor name = %s", in_name); + + ret = ml_tensors_info_get_tensor_size(in_info, i, &in_size); + if (ret != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_tensor_size(%d).", ret); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("input tensor size = %u", in_size); + + // TODO. Compare tensor info from engine to one from a given property. + } + + property.layer_names = mInputProperty.layer_names; + + std::vector::iterator iter; + for (iter = mInputProperty.tensor_infos.begin(); iter != mInputProperty.tensor_infos.end(); iter++) { + inference_engine_tensor_info tensor_info = *iter; + property.tensor_infos.push_back(tensor_info); + } + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetOutputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + property.layer_names = mOutputProperty.layer_names; + + inference_engine_tensor_info tensor_info; + + // TODO. Set tensor info from a given nnstreamer backend instead of fixed one. + + tensor_info.data_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT16; + tensor_info.shape = { 1, 1001 }; + tensor_info.size = 1001; + property.tensor_infos.push_back(tensor_info); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::SetInputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + std::vector::iterator iter; + for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { + std::string name = *iter; + LOGI("input layer name = %s", name.c_str()); + } + + mDesignated_inputs.clear(); + std::vector().swap(mDesignated_inputs); + + // TODO. Request input property information to a given nnstreamer backend, + // and set it instead of user-given one, + + mDesignated_inputs = property.layer_names; + mInputProperty = property; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::SetOutputLayerProperty(inference_engine_layer_property &property) +{ + LOGI("ENTER"); + + std::vector::iterator iter; + for (iter = property.layer_names.begin(); iter != property.layer_names.end(); iter++) { + std::string name = *iter; + LOGI("output layer name = %s", name.c_str()); + } + + mDesignated_outputs.clear(); + std::vector().swap(mDesignated_outputs); + + // TODO. Request output property information to a given nnstreamer backend, + // and set it instead of user-given one, + + mDesignated_outputs = property.layer_names; + mOutputProperty = property; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::GetBackendCapacity(inference_engine_capacity *capacity) +{ + LOGI("ENTER"); + + if (capacity == NULL) { + LOGE("Bad pointer."); + return INFERENCE_ENGINE_ERROR_INVALID_PARAMETER; + } + + // TODO. flag supported accel device types according to a given nnstreamer backend. + capacity->supported_accel_devices = INFERENCE_TARGET_CUSTOM; + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::CheckTensorBuffers(std::vector &input_buffers, + std::vector &output_buffers) +{ + LOGI("ENTER"); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +int InferenceMLAPI::Run(std::vector &input_buffers, + std::vector &output_buffers) +{ + LOGI("ENTER"); + + // Make sure to check if tensor buffer count and binding info one are same. + int err = CheckTensorBuffers(input_buffers, output_buffers); + if (err != INFERENCE_ENGINE_ERROR_NONE) { + return err; + } + + ml_tensors_info_h in_info = NULL; + + err = ml_single_get_input_info(mSingle, &in_info); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_get_input_info(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + ml_tensors_data_h input_data = NULL; + err = ml_tensors_data_create(in_info, &input_data); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_create(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + unsigned int cnt; + err = ml_tensors_info_get_count(in_info, &cnt); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_info_get_count(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + for (unsigned int i = 0; i < cnt; ++i) { + LOGI("index(%d) : buffer = %p, size = %u\n", i, input_buffers[i].buffer, input_buffers[i].size); + err = ml_tensors_data_set_tensor_data(input_data, i, input_buffers[i].buffer, input_buffers[i].size); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_set_tensor_data(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + } + + ml_tensors_data_h output_data = NULL; + err = ml_single_invoke(mSingle, input_data, &output_data); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_single_invoke(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + // TODO. Consider mutiple output tensors. + + err = ml_tensors_data_get_tensor_data(output_data, 0, (void **)&output_buffers[0].buffer, &output_buffers[0].size); + if (err != ML_ERROR_NONE) { + LOGE("Failed to request ml_tensors_data_get_tensor_data(%d).", err); + return INFERENCE_ENGINE_ERROR_INVALID_OPERATION; + } + + LOGI("Output tensor = %u", output_buffers[0].size); + + LOGI("LEAVE"); + + return INFERENCE_ENGINE_ERROR_NONE; +} + +extern "C" +{ +class IInferenceEngineCommon* EngineCommonInit(void) +{ + LOGI("ENTER"); + + InferenceMLAPI *engine = new InferenceMLAPI(); + + LOGI("LEAVE"); + + return engine; +} + +void EngineCommonDestroy(class IInferenceEngineCommon *engine) +{ + LOGI("ENTER"); + + delete engine; + + LOGI("LEAVE"); +} +} +} /* MLAPIImpl */ +} /* InferenceEngineImpl */ diff --git a/src/inference_engine_nnstreamer_private.h b/src/inference_engine_nnstreamer_private.h new file mode 100644 index 0000000..d695f43 --- /dev/null +++ b/src/inference_engine_nnstreamer_private.h @@ -0,0 +1,85 @@ +/** + * Copyright (c) 2020 Samsung Electronics Co., Ltd All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ +#define __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ + +#include +#include + +#include +#include + +#ifdef LOG_TAG +#undef LOG_TAG +#endif + +#define LOG_TAG "INFERENCE_ENGINE_MLAPI" + +using namespace InferenceEngineInterface::Common; + +namespace InferenceEngineImpl { +namespace MLAPIImpl { + +class InferenceMLAPI : public IInferenceEngineCommon { +public: + InferenceMLAPI(); + ~InferenceMLAPI(); + + int SetPluginType(int type) override; + + int SetTargetDevices(int types) override; + + int Load(std::vector model_paths, inference_model_format_e model_format) override; + + int GetInputTensorBuffers(std::vector &buffers) override; + + int GetOutputTensorBuffers(std::vector &buffers) override; + + int GetInputLayerProperty(inference_engine_layer_property &property) override; + + int GetOutputLayerProperty(inference_engine_layer_property &property) override; + + int SetInputLayerProperty(inference_engine_layer_property &property) override; + + int SetOutputLayerProperty(inference_engine_layer_property &property) override; + + int GetBackendCapacity(inference_engine_capacity *capacity) override; + + int Run(std::vector &input_buffers, + std::vector &output_buffers) override; + +private: + int CheckTensorBuffers(std::vector &input_buffers, + std::vector &output_buffers); + + int mPluginType; + int mTargetDevice; + ml_single_h mSingle; + std::vector mDesignated_inputs; + std::vector mDesignated_outputs; + inference_engine_layer_property mInputProperty; + inference_engine_layer_property mOutputProperty; + std::vector mInputTensorBuffer; + std::vector mOutputTensorBuffer; + std::vector mInputTensorInfo; + std::vector mOutputTensorInfo; +}; + +} /* InferenceEngineImpl */ +} /* MLAPIImpl */ + +#endif /* __INFERENCE_ENGINE_NNSTREAMER_PRIVATE_H__ */ -- 2.34.1