Note that this initial code is still under development and provides limited functionality.
Signed-off-by: Tae-Young Chung <ty83.chung@samsung.com>
--- /dev/null
+Taeyoung Chung <ty83.chung@samsung.com>
\ No newline at end of file
--- /dev/null
+
+CMAKE_MINIMUM_REQUIRED(VERSION 2.6)
+SET(fw_name "inference-engine-tf")
+
+PROJECT(${fw_name})
+
+SET(CMAKE_INSTALL_PREFIX /usr)
+SET(PREFIX ${CMAKE_INSTALL_PREFIX})
+
+SET(INC_DIR "${PROJECT_SOURCE_DIR}/include")
+SET(dependents "dlog inference-engine-interface-vision inference-engine-interface-common tensorflow protobuf")
+
+INCLUDE(FindPkgConfig)
+pkg_check_modules(${fw_name} REQUIRED ${dependents})
+FOREACH(flag ${${fw_name}_CFLAGS})
+ SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
+ SET(EXTRA_CXXFLAGS "${EXTRA_CXXFLAGS} ${flag}")
+ENDFOREACH(flag)
+
+FOREACH(flag ${${fw_name}_LDFLAGS})
+ SET(EXTRA_LDFLAGS "${EXTRA_LDFLAGS} ${flag}")
+ENDFOREACH(flag)
+#Remove leading whitespace POLICY CMP0004
+STRING(REGEX REPLACE "^ " "" EXTRA_LDFLAGS ${EXTRA_LDFLAGS})
+
+#OpenCV
+FIND_PACKAGE(OpenCV REQUIRED core)
+if(NOT OpenCV_FOUND)
+ MESSAGE(SEND_ERROR "OpenCV NOT FOUND")
+ RETURN()
+else()
+ INCLUDE_DIRECTORIES(${OpenCV_INCLUDE_DIRS})
+endif()
+
+SET(CMAKE_C_FLAGS "-I./include -I./include/headers ${CMAKE_C_FLAGS} ${EXTRA_CFLAGS} -fPIC -Wall -w")
+SET(CMAKE_C_FLAGS_DEBUG "-O0 -g")
+
+SET(CMAKE_CXX_FLAGS "-I./include -I./include/headers ${CMAKE_CXX_FLAGS} ${EXTRA_CXXFLAGS} -fPIC")
+SET(CMAKE_CXX_FLAGS_DEBUG "-O0 -g --w")
+
+ADD_DEFINITIONS("-DPREFIX=\"${CMAKE_INSTALL_PREFIX}\"")
+ADD_DEFINITIONS("-DTIZEN_DEBUG")
+
+SET(CMAKE_EXE_LINKER_FLAGS "-Wl,--as-needed -Wl,--rpath=${LIB_INSTALL_DIR}")
+
+aux_source_directory(src SOURCES)
+ADD_LIBRARY(${fw_name} SHARED ${SOURCES})
+
+TARGET_LINK_LIBRARIES(${fw_name} ${OpenCV_LIBS} ${EXTRA_LDFLAGS})
+
+SET_TARGET_PROPERTIES(${fw_name}
+ PROPERTIES
+ CLEAN_DIRECT_OUTPUT 1
+)
+
+INSTALL(TARGETS ${fw_name} DESTINATION ${LIB_INSTALL_DIR})
+INSTALL(
+ DIRECTORY ${INC_DIR}/ DESTINATION include/media
+ FILES_MATCHING
+ PATTERN "*_private.h" EXCLUDE
+ PATTERN "*.h"
+ )
+
+SET(PC_NAME ${fw_name})
+SET(PC_REQUIRED ${pc_dependents})
+SET(PC_LDFLAGS -l${fw_name})
+SET(PC_CFLAGS -I\${includedir}/media)
+
+IF(UNIX)
+
+ADD_CUSTOM_TARGET (distclean @echo cleaning for source distribution)
+ADD_CUSTOM_COMMAND(
+ DEPENDS clean
+ COMMENT "distribution clean"
+ COMMAND find
+ ARGS .
+ -not -name config.cmake -and \(
+ -name tester.c -or
+ -name Testing -or
+ -name CMakeFiles -or
+ -name cmake.depends -or
+ -name cmake.check_depends -or
+ -name CMakeCache.txt -or
+ -name cmake.check_cache -or
+ -name *.cmake -or
+ -name Makefile -or
+ -name core -or
+ -name core.* -or
+ -name gmon.out -or
+ -name install_manifest.txt -or
+ -name *.pc -or
+ -name *~ \)
+ | grep -v TC | xargs rm -rf
+ TARGET distclean
+ VERBATIM
+)
+
+ENDIF(UNIX)
\ No newline at end of file
--- /dev/null
+Copyright (c) 2000 - 2011 Samsung Electronics Co., Ltd. All rights reserved.\r
+\r
+ Apache License\r
+ Version 2.0, January 2004\r
+ http://www.apache.org/licenses/\r
+\r
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\r
+\r
+ 1. Definitions.\r
+\r
+ "License" shall mean the terms and conditions for use, reproduction,\r
+ and distribution as defined by Sections 1 through 9 of this document.\r
+\r
+ "Licensor" shall mean the copyright owner or entity authorized by\r
+ the copyright owner that is granting the License.\r
+\r
+ "Legal Entity" shall mean the union of the acting entity and all\r
+ other entities that control, are controlled by, or are under common\r
+ control with that entity. For the purposes of this definition,\r
+ "control" means (i) the power, direct or indirect, to cause the\r
+ direction or management of such entity, whether by contract or\r
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the\r
+ outstanding shares, or (iii) beneficial ownership of such entity.\r
+\r
+ "You" (or "Your") shall mean an individual or Legal Entity\r
+ exercising permissions granted by this License.\r
+\r
+ "Source" form shall mean the preferred form for making modifications,\r
+ including but not limited to software source code, documentation\r
+ source, and configuration files.\r
+\r
+ "Object" form shall mean any form resulting from mechanical\r
+ transformation or translation of a Source form, including but\r
+ not limited to compiled object code, generated documentation,\r
+ and conversions to other media types.\r
+\r
+ "Work" shall mean the work of authorship, whether in Source or\r
+ Object form, made available under the License, as indicated by a\r
+ copyright notice that is included in or attached to the work\r
+ (an example is provided in the Appendix below).\r
+\r
+ "Derivative Works" shall mean any work, whether in Source or Object\r
+ form, that is based on (or derived from) the Work and for which the\r
+ editorial revisions, annotations, elaborations, or other modifications\r
+ represent, as a whole, an original work of authorship. For the purposes\r
+ of this License, Derivative Works shall not include works that remain\r
+ separable from, or merely link (or bind by name) to the interfaces of,\r
+ the Work and Derivative Works thereof.\r
+\r
+ "Contribution" shall mean any work of authorship, including\r
+ the original version of the Work and any modifications or additions\r
+ to that Work or Derivative Works thereof, that is intentionally\r
+ submitted to Licensor for inclusion in the Work by the copyright owner\r
+ or by an individual or Legal Entity authorized to submit on behalf of\r
+ the copyright owner. For the purposes of this definition, "submitted"\r
+ means any form of electronic, verbal, or written communication sent\r
+ to the Licensor or its representatives, including but not limited to\r
+ communication on electronic mailing lists, source code control systems,\r
+ and issue tracking systems that are managed by, or on behalf of, the\r
+ Licensor for the purpose of discussing and improving the Work, but\r
+ excluding communication that is conspicuously marked or otherwise\r
+ designated in writing by the copyright owner as "Not a Contribution."\r
+\r
+ "Contributor" shall mean Licensor and any individual or Legal Entity\r
+ on behalf of whom a Contribution has been received by Licensor and\r
+ subsequently incorporated within the Work.\r
+\r
+ 2. Grant of Copyright License. Subject to the terms and conditions of\r
+ this License, each Contributor hereby grants to You a perpetual,\r
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\r
+ copyright license to reproduce, prepare Derivative Works of,\r
+ publicly display, publicly perform, sublicense, and distribute the\r
+ Work and such Derivative Works in Source or Object form.\r
+\r
+ 3. Grant of Patent License. Subject to the terms and conditions of\r
+ this License, each Contributor hereby grants to You a perpetual,\r
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable\r
+ (except as stated in this section) patent license to make, have made,\r
+ use, offer to sell, sell, import, and otherwise transfer the Work,\r
+ where such license applies only to those patent claims licensable\r
+ by such Contributor that are necessarily infringed by their\r
+ Contribution(s) alone or by combination of their Contribution(s)\r
+ with the Work to which such Contribution(s) was submitted. If You\r
+ institute patent litigation against any entity (including a\r
+ cross-claim or counterclaim in a lawsuit) alleging that the Work\r
+ or a Contribution incorporated within the Work constitutes direct\r
+ or contributory patent infringement, then any patent licenses\r
+ granted to You under this License for that Work shall terminate\r
+ as of the date such litigation is filed.\r
+\r
+ 4. Redistribution. You may reproduce and distribute copies of the\r
+ Work or Derivative Works thereof in any medium, with or without\r
+ modifications, and in Source or Object form, provided that You\r
+ meet the following conditions:\r
+\r
+ (a) You must give any other recipients of the Work or\r
+ Derivative Works a copy of this License; and\r
+\r
+ (b) You must cause any modified files to carry prominent notices\r
+ stating that You changed the files; and\r
+\r
+ (c) You must retain, in the Source form of any Derivative Works\r
+ that You distribute, all copyright, patent, trademark, and\r
+ attribution notices from the Source form of the Work,\r
+ excluding those notices that do not pertain to any part of\r
+ the Derivative Works; and\r
+\r
+ (d) If the Work includes a "NOTICE" text file as part of its\r
+ distribution, then any Derivative Works that You distribute must\r
+ include a readable copy of the attribution notices contained\r
+ within such NOTICE file, excluding those notices that do not\r
+ pertain to any part of the Derivative Works, in at least one\r
+ of the following places: within a NOTICE text file distributed\r
+ as part of the Derivative Works; within the Source form or\r
+ documentation, if provided along with the Derivative Works; or,\r
+ within a display generated by the Derivative Works, if and\r
+ wherever such third-party notices normally appear. The contents\r
+ of the NOTICE file are for informational purposes only and\r
+ do not modify the License. You may add Your own attribution\r
+ notices within Derivative Works that You distribute, alongside\r
+ or as an addendum to the NOTICE text from the Work, provided\r
+ that such additional attribution notices cannot be construed\r
+ as modifying the License.\r
+\r
+ You may add Your own copyright statement to Your modifications and\r
+ may provide additional or different license terms and conditions\r
+ for use, reproduction, or distribution of Your modifications, or\r
+ for any such Derivative Works as a whole, provided Your use,\r
+ reproduction, and distribution of the Work otherwise complies with\r
+ the conditions stated in this License.\r
+\r
+ 5. Submission of Contributions. Unless You explicitly state otherwise,\r
+ any Contribution intentionally submitted for inclusion in the Work\r
+ by You to the Licensor shall be under the terms and conditions of\r
+ this License, without any additional terms or conditions.\r
+ Notwithstanding the above, nothing herein shall supersede or modify\r
+ the terms of any separate license agreement you may have executed\r
+ with Licensor regarding such Contributions.\r
+\r
+ 6. Trademarks. This License does not grant permission to use the trade\r
+ names, trademarks, service marks, or product names of the Licensor,\r
+ except as required for reasonable and customary use in describing the\r
+ origin of the Work and reproducing the content of the NOTICE file.\r
+\r
+ 7. Disclaimer of Warranty. Unless required by applicable law or\r
+ agreed to in writing, Licensor provides the Work (and each\r
+ Contributor provides its Contributions) on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+ implied, including, without limitation, any warranties or conditions\r
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\r
+ PARTICULAR PURPOSE. You are solely responsible for determining the\r
+ appropriateness of using or redistributing the Work and assume any\r
+ risks associated with Your exercise of permissions under this License.\r
+\r
+ 8. Limitation of Liability. In no event and under no legal theory,\r
+ whether in tort (including negligence), contract, or otherwise,\r
+ unless required by applicable law (such as deliberate and grossly\r
+ negligent acts) or agreed to in writing, shall any Contributor be\r
+ liable to You for damages, including any direct, indirect, special,\r
+ incidental, or consequential damages of any character arising as a\r
+ result of this License or out of the use or inability to use the\r
+ Work (including but not limited to damages for loss of goodwill,\r
+ work stoppage, computer failure or malfunction, or any and all\r
+ other commercial damages or losses), even if such Contributor\r
+ has been advised of the possibility of such damages.\r
+\r
+ 9. Accepting Warranty or Additional Liability. While redistributing\r
+ the Work or Derivative Works thereof, You may choose to offer,\r
+ and charge a fee for, acceptance of support, warranty, indemnity,\r
+ or other liability obligations and/or rights consistent with this\r
+ License. However, in accepting such obligations, You may act only\r
+ on Your own behalf and on Your sole responsibility, not on behalf\r
+ of any other Contributor, and only if You agree to indemnify,\r
+ defend, and hold each Contributor harmless for any liability\r
+ incurred by, or claims asserted against, such Contributor by reason\r
+ of your accepting any such warranty or additional liability.\r
+\r
+ END OF TERMS AND CONDITIONS\r
+\r
+ APPENDIX: How to apply the Apache License to your work.\r
+\r
+ To apply the Apache License to your work, attach the following\r
+ boilerplate notice, with the fields enclosed by brackets "[]"\r
+ replaced with your own identifying information. (Don't include\r
+ the brackets!) The text should be enclosed in the appropriate\r
+ comment syntax for the file format. We also recommend that a\r
+ file or class name and description of purpose be included on the\r
+ same "printed page" as the copyright notice for easier\r
+ identification within third-party archives.\r
+\r
+ Copyright [yyyy] [name of copyright owner]\r
+\r
+ Licensed under the Apache License, Version 2.0 (the "License");\r
+ you may not use this file except in compliance with the License.\r
+ You may obtain a copy of the License at\r
+\r
+ http://www.apache.org/licenses/LICENSE-2.0\r
+\r
+ Unless required by applicable law or agreed to in writing, software\r
+ distributed under the License is distributed on an "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+ See the License for the specific language governing permissions and\r
+ limitations under the License.\r
+\r
+\r
+\r
--- /dev/null
+Copyright (c) Samsung Electronics Co., Ltd. All rights reserved.
+Except as noted, this software is licensed under Apache License, Version 2.
+Please, see the LICENSE.APLv2 file for Apache License terms and conditions.
--- /dev/null
+# inference-engine-tf
+This is Tensorflow based implementation of inference-engine-interface
\ No newline at end of file
--- /dev/null
+<manifest>
+ <request>
+ <domain name="_" />
+ </request>
+</manifest>
--- /dev/null
+Name: inference-engine-tf
+Summary: Tensorflow based implementation of inference-engine-interface
+Version: 0.0.1
+Release: 1
+Group: Multimedia/Libraries
+License: Apache-2.0
+Source0: %{name}-%{version}.tar.gz
+Requires(post): /sbin/ldconfig
+Requires(postun): /sbin/ldconfig
+BuildRequires: cmake
+BuildRequires: python
+BuildRequires: pkgconfig(dlog)
+BuildRequires: pkgconfig(inference-engine-interface-vision)
+BuildRequires: pkgconfig(inference-engine-interface-common)
+BuildRequires: pkgconfig(opencv) >= 3.4.1
+BuildRequires: pkgconfig(protobuf) >= 3.6.1
+BuildRequires: pkgconfig(tensorflow)
+
+%description
+Tensorflow based implementation of inference-engine-interface
+
+
+%prep
+%setup -q
+
+%build
+%if 0%{?sec_build_binary_debug_enable}
+export CFLAGS="$CFLAGS -DTIZEN_DEBUG_ENABLE"
+export CXXFLAGS="$CXXFLAGS -DTIZEN_DEBUG_ENABLE"
+export FFLAGS="$FFLAGS -DTIZEN_DEBUG_ENABLE"
+%endif
+
+%cmake .
+
+make %{?jobs:-j%jobs}
+
+%install
+rm -rf %{buildroot}
+
+%make_install
+
+%post -p /sbin/ldconfig
+%postun -p /sbin/ldconfig
+
+%files
+%manifest %{name}.manifest
+%license LICENSE.APLv2
+%defattr(-,root,root,-)
+%{_libdir}/*.so
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <inference_engine_error.h>
+#include "inference_engine_tf_private.h"
+
+#include <fstream>
+#include <iostream>
+#include <unistd.h>
+#include <time.h>
+
+namespace InferenceEngineImpl {
+namespace TFImpl {
+
+InferenceTF::InferenceTF(std::string protoFile, std::string weightFile,
+ std::string userFile) :
+ mConfigFile(protoFile),
+ mWeightFile(weightFile),
+ mUserFile(userFile)
+{
+ LOGE("ENTER");
+ LOGI("InferenceEngineImpl::TFImpl");
+ LOGI("using vision");
+ LOGI("using common");
+ LOGE("LEAVE");
+}
+
+InferenceTF::~InferenceTF()
+{
+ ;
+}
+
+int InferenceTF::SetUserFile()
+{
+ std::ifstream fp(mUserFile.c_str());
+ if (!fp.is_open()) {
+ return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+ }
+
+ std::string userListName;
+ while (!fp.eof()) {
+ std::getline(fp, userListName);
+ if (userListName.length())
+ SetUserListName(userListName);
+ }
+
+ fp.close();
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetInputTensorParam()
+{
+ LOGE("Not supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+}
+
+int InferenceTF::SetInputTensorParamInput(int width, int height, int dim, int ch)
+{
+ mCh = ch;
+ mDim = dim;
+ mInputSize = cv::Size(width, height);
+
+ LOGE("InputSize is %d x %d\n", mInputSize.width, mInputSize.height);
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetInputTensorParamNorm(double deviation, double mean)
+{
+ mDeviation = deviation;
+ mMean = mean;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetInputTensorParamNode(std::string node)
+{
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetOutputTensorParam()
+{
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+}
+
+int InferenceTF::SetOutputTensorParamThresHold(double threshold)
+{
+ mThreshold = threshold;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetOutputTensorParamNumbers(int number)
+{
+ mOutputNumbers = number;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetOutputTensorParamType(int type)
+{
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetOutPutTensorParamNodes(std::string node)
+{
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::SetTargetDevice(inference_target_type_e type)
+{
+ /* CPU mode only */
+ switch (type) {
+ case INFERENCE_TARGET_CPU:
+ break;
+ case INFERENCE_TARGET_GPU:
+ break;
+ case INFERENCE_TARGET_NONE:
+ default:
+ LOGE("Not supported device type [%d], Set CPU mode", (int)type);
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::Load()
+{
+ int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+ if (access(mWeightFile.c_str(), F_OK)) {
+ LOGE("weightFilePath in [%s] ", mWeightFile.c_str());
+ return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+ }
+
+ tensorflow::Status load_graph_status =
+ ReadBinaryProto(tensorflow::Env::Default(), mWeightFile, &mGraphDef);
+ if (!load_graph_status.ok()) {
+ return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+ }
+ session.reset(tensorflow::NewSession(tensorflow::SessionOptions()));
+ tensorflow::Status session_create_status = session->Create(mGraphDef);
+ if (!session_create_status.ok()) {
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ size_t userFileLength = mUserFile.length();
+ if (userFileLength > 0 && access(mUserFile.c_str(), F_OK)) {
+ LOGE("userFilePath in [%s] ", mUserFile.c_str());
+ return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+ }
+
+ ret = (userFileLength > 0) ? SetUserFile() : INFERENCE_ENGINE_ERROR_NONE;
+ if (ret != INFERENCE_ENGINE_ERROR_NONE)
+ LOGE("Fail to read categoryList");
+
+ int maxNodes = mGraphDef.node_size();
+ //mInputLayer = mGraphDef.node(0).name();
+ int inputNode = 0;
+ for (int idx = 0; idx < maxNodes; ++idx) {
+ mInputLayer = mGraphDef.node(idx).name();
+ if (mInputLayer.compare("image_tensor") == 0 || mInputLayer.compare("input") == 0) {
+ inputNode = idx;
+ break;
+ }
+ }
+ LOGE("Input layer: %d", inputNode);
+ mOutputLayer = mGraphDef.node(maxNodes - 1).name();
+
+ /* for (int k = 0; k < maxNodes; ++k) {
+ std::string layerstr = mGraphDef.node(k).name();
+ LOGE("node[%3d]: %s", k, layerstr.c_str());
+ }
+ */
+
+ auto inputAttr = mGraphDef.node(inputNode).attr();
+ tensorflow::AttrValue attrType = inputAttr["dtype"];
+ mInputAttrType = attrType.type();
+
+ tensorflow::Tensor inputTensor(mInputAttrType, tensorflow::TensorShape({ 1, mInputSize.height, mInputSize.width, mCh }));
+ mInputBlob = inputTensor;
+
+/*
+ if (mInputAttrType == tensorflow::DT_UINT8) {
+ mMatType = CV_8UC3;
+ mInputData = mInputBlob.flat<uint8_t>().data();
+ LOGE("InputType is DT_UNIT8");
+ }
+ else if (mInputAttrType == tensorflow::DT_FLOAT) {
+ mMatType = CV_32FC3;
+ mInputData = mInputBlob.flat<float>().data();
+ LOGE("InputType is DT_FLOAT");
+ }
+ else {
+ LOGE("NOT supported");
+ }
+
+ mInputBuffer = cv::Mat(mInputSize.height, mInputSize.width, mMatType, mInputData);
+ */
+ return ret;
+}
+
+int InferenceTF::CreateInputLayerPassage()
+{
+ if (mInputAttrType == tensorflow::DT_UINT8) {
+ mInputData = mInputBlob.flat<uint8_t>().data();
+ LOGE("InputType is DT_UNIT8");
+ }
+ else if (mInputAttrType == tensorflow::DT_FLOAT) {
+ mInputData = mInputBlob.flat<float>().data();
+ LOGE("InputType is DT_FLOAT");
+ }
+ else {
+ LOGE("NOT supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::PrepareInputLayerPassage()
+{
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::PrepareInputLayerPassage(inference_input_type_e type)
+{
+ switch (type) {
+ case INFERENCE_INPUT_GENERAL:
+ break;
+ case INFERENCE_INPUT_IMAGE:
+ {
+ if (mInputAttrType == tensorflow::DT_UINT8)
+ mMatType = CV_8UC3;
+ else if (mInputAttrType == tensorflow::DT_FLOAT)
+ mMatType = CV_32FC3;
+ else
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+
+ mInputBuffer = cv::Mat(mInputSize.height, mInputSize.width, mMatType, mInputData);
+ }
+ break;
+ default:
+ LOGE("Not supported");
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED_FORMAT;
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::setInput(cv::Mat cvImg)
+{
+ mSourceSize = cvImg.size();
+ int width = mInputSize.width;
+ int height = mInputSize.height;
+
+ //PreProcess();
+ cv::Mat sample;
+ if (cvImg.channels() == 3 && mCh == 1)
+ cv::cvtColor(cvImg, sample, cv::COLOR_BGR2GRAY);
+ else
+ sample = cvImg;
+
+ // size
+ cv::Mat sampleResized;
+ if (sample.size() != cv::Size(width, height))
+ cv::resize(sample, sampleResized, cv::Size(width, height));
+ else
+ sampleResized = sample;
+
+ // type
+ cv::Mat sampleFloat;
+ if (mCh == 3)
+ sampleResized.convertTo(sampleFloat, CV_32FC3);
+ else
+ sampleResized.convertTo(sampleFloat, CV_32FC1);
+
+ // normalize
+ cv::Mat sampleNormalized;
+ cv::Mat meanMat;
+ if (mCh == 3)
+ meanMat = cv::Mat(sampleFloat.size(), CV_32FC3, cv::Scalar((float)mMean, (float)mMean, (float)mMean));
+ else
+ meanMat = cv::Mat(sampleFloat.size(), CV_32FC1, cv::Scalar((float)mMean));
+
+ cv::subtract(sampleFloat, meanMat, sampleNormalized);
+
+ sampleNormalized /= (float)mDeviation;
+
+ sampleNormalized.convertTo(mInputBuffer, mMatType);
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::Run(cv::Mat tensor)
+{
+ int ret = setInput(tensor);
+ std::vector<std::string> output_layers;
+ if (mOutputLayer.compare("InceptionV3/Predictions/Reshape_1") == 0) {
+ output_layers.push_back(mOutputLayer);
+ }
+ else if (mOutputLayer.compare("detection_classes") == 0 ||
+ mOutputLayer.compare("num_detections") == 0) {
+ output_layers = { "detection_boxes:0", "detection_scores:0", "detection_classes:0", "num_detections:0" };
+ }
+ else if (mOutputLayer.compare("mbox_conf_flatten") == 0) {
+ output_layers = { "mbox_loc:0", "mbox_conf:0", "mbox_conf_reshape:0", "mbox_conf_softmax:0", "mbox_conf_flatten:0" };
+ }
+ else {
+ LOGE("Not supported output layer[%s]", mOutputLayer.c_str());
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ tensorflow::Status status = session->Run({ { mInputLayer, mInputBlob } }, output_layers, {}, &mOutputBlob);
+
+ if (!status.ok()) {
+ LOGE("%s", status.ToString().c_str());
+ LOGE("%s", status.error_message().c_str());
+ ret = INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ return ret;
+}
+
+int InferenceTF::Run(std::vector<float> tensor)
+{
+ int dataIdx = 0;
+ float * inputData = static_cast<float*>(mInputData);
+ for( std::vector<float>::iterator iter = tensor.begin();
+ iter != tensor.end(); ++iter) {
+ inputData[dataIdx] = *iter;
+ dataIdx++;
+ }
+
+ std::vector<std::string> output_layers;
+ output_layers.push_back(mOutputLayer);
+
+ tensorflow::Status status = session->Run({ { mInputLayer, mInputBlob } }, output_layers, {}, &mOutputBlob);
+
+ if (!status.ok()) {
+ LOGE("%s", status.ToString().c_str());
+ LOGE("%s", status.error_message().c_str());
+ return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::GetInferenceResult(ImageClassificationResults& results)
+{
+ int outputSize = mOutputBlob.size();
+ LOGE("mOutputBlobSize: %d", outputSize);
+ for (int k = 0; k < outputSize; ++k) {
+ int tmpDim = mOutputBlob[k].dims();
+ LOGE("Dim: %d: %d", k, tmpDim);
+ for (int kk = 0; kk < tmpDim; ++kk) {
+ LOGE("Dim_size: %d: %d", kk, (int)mOutputBlob[k].shape().dim_size(kk));
+ }
+ }
+
+ tensorflow::Scope root = tensorflow::Scope::NewRootScope();
+
+ tensorflow::string output_name = "top_k";
+
+ tensorflow::ops::TopK(root.WithOpName(output_name), mOutputBlob[0], mOutputNumbers);
+
+ // This runs the GraphDef network definition that we've just constructed, and
+ // returns the results in the output tensors.
+ tensorflow::GraphDef graph;
+ root.ToGraphDef(&graph);
+
+ std::unique_ptr<tensorflow::Session> _session(tensorflow::NewSession(tensorflow::SessionOptions()));
+ _session->Create(graph);
+
+ // The TopK node returns two outputs, the scores and their original indices,
+ // so we have to append :0 and :1 to specify them both.
+ std::vector<tensorflow::Tensor> out_tensors;
+ _session->Run({}, { output_name + ":0", output_name + ":1" }, {}, &out_tensors);
+
+ tensorflow::Tensor scores = out_tensors[0];
+ tensorflow::Tensor indices = out_tensors[1];
+
+ tensorflow::TTypes<float>::Flat scores_flat = scores.flat<float>();
+ tensorflow::TTypes<int32_t>::Flat indices_flat = indices.flat<int32_t>();
+
+ int classIdx = -1;
+ for (int idx = 0; idx < mOutputNumbers; ++idx) {
+ LOGE("idx:%d", idx);
+ classIdx = indices_flat(idx);
+ LOGI("classIdx: %d", classIdx);
+ LOGI("classProb: %f", scores_flat(idx));
+
+ results.indices.push_back(classIdx);
+ results.confidences.push_back(scores_flat(idx));
+ results.names.push_back(mUserListName[classIdx]);
+ }
+
+ results.number_of_classes = mOutputNumbers;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::GetInferenceResult(ObjectDetectionResults& results)
+{
+ int maxNodes = mGraphDef.node_size();
+ auto outputAttr = mGraphDef.node(maxNodes - 1).attr();
+
+ float confidence;
+ size_t objectClass;
+ int left, top, right, bottom;
+ cv::Rect loc;
+
+ tensorflow::AttrValue attrType = outputAttr["dtype"];
+
+ LOGE("sizeof mOutputBlob: %d", mOutputBlob.size());
+ for (int k = 0; k < mOutputBlob.size(); ++k) {
+ int k_dims = mOutputBlob[k].shape().dims();
+ LOGE("%d: dims %d", k, k_dims);
+ for (int kk = 0; kk < k_dims; ++kk) {
+ LOGE("dim[%d]: shape %d", kk, (int)mOutputBlob[k].shape().dim_size(kk));
+ }
+ }
+
+ tensorflow::TTypes<float>::Flat scores = mOutputBlob[1].flat<float>();
+ tensorflow::TTypes<float>::Flat classes = mOutputBlob[2].flat<float>();
+ tensorflow::TTypes<float>::Flat detections = mOutputBlob[3].flat<float>();
+ auto boxes = mOutputBlob[0].flat_outer_dims<float, 3>();
+
+ int number_of_detections = detections(0);
+
+ int number_of_objects = 0;
+ for (int idx = 0; idx < number_of_detections; ++idx) {
+ confidence = scores(idx);
+ if (confidence >(float)mThreshold) {
+ objectClass = (size_t)classes(idx);
+
+ left = static_cast<int>(boxes(0, idx, 1) * mSourceSize.width);
+ top = static_cast<int>(boxes(0, idx, 0) * mSourceSize.height);
+ right = static_cast<int>(boxes(0, idx, 3) * mSourceSize.width);
+ bottom = static_cast<int>(boxes(0, idx, 2) * mSourceSize.height);
+
+ loc.x = left;
+ loc.y = top;
+ loc.width = right - left + 1;
+ loc.height = bottom - top + 1;
+
+ LOGE("objectClass: %d", objectClass);
+ LOGE("confidence:%f", confidence);
+ LOGE("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
+
+ results.indices.push_back(objectClass);
+ results.confidences.push_back(confidence);
+ results.names.push_back(mUserListName[objectClass]);
+ results.locations.push_back(loc);
+ number_of_objects++;
+ }
+ }
+
+ results.number_of_objects = number_of_objects;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::GetInferenceResult(FaceDetectionResults& results)
+{
+ int maxNodes = mGraphDef.node_size();
+ auto outputAttr = mGraphDef.node(maxNodes-1).attr();
+
+ float confidence;
+ int left, top, right, bottom;
+ cv::Rect loc;
+
+ tensorflow::AttrValue attrType = outputAttr["dtype"];
+ tensorflow::DataType outputAttrType= attrType.type();
+ if (outputAttrType == tensorflow::DT_UINT8) {
+ LOGE("OutputAttrType is unit8");
+ } else if (outputAttrType == tensorflow::DT_FLOAT) {
+ LOGE("OutputAttrType is float");
+ } else {
+ LOGE("OutputAttrType is %d", outputAttrType);
+ }
+
+ LOGE("sizeof mOutputBlob: %d", mOutputBlob.size());
+ for (int k = 0; k < mOutputBlob.size(); ++k) {
+ int k_dims = mOutputBlob[k].shape().dims();
+ LOGE("%d: dims %d", k, k_dims);
+ for (int kk = 0; kk < k_dims; ++kk) {
+ LOGE("dim[%d]: shape %d", kk, (int)mOutputBlob[k].shape().dim_size(kk));
+ }
+ }
+
+ tensorflow::TTypes<float>::Flat scores = mOutputBlob[1].flat<float>();
+ tensorflow::TTypes<float>::Flat classes = mOutputBlob[2].flat<float>();
+ tensorflow::TTypes<float>::Flat detections = mOutputBlob[3].flat<float>();
+ auto boxes = mOutputBlob[0].flat_outer_dims<float, 3>();
+
+ int number_of_detections = detections(0);
+
+ int number_of_faces = 0;
+ for (int idx =0; idx < number_of_detections; ++idx) {
+ confidence = scores(idx);
+ if (confidence > (float)mThreshold) {
+
+ left = static_cast<int>(boxes(0,idx,1) * mSourceSize.width);
+ top = static_cast<int>(boxes(0,idx,0) * mSourceSize.height);
+ right = static_cast<int>(boxes(0,idx,3) * mSourceSize.width);
+ bottom = static_cast<int>(boxes(0,idx,2) * mSourceSize.height);
+
+ loc.x = left;
+ loc.y = top;
+ loc.width = right - left + 1;
+ loc.height = bottom - top + 1;
+
+ LOGE("objectClass: %d", (size_t)classes(idx));
+ LOGE("confidence:%f", confidence);
+ LOGE("left:%d, top:%d, right:%d, bottom:%d", left, top, right, bottom);
+
+ results.confidences.push_back(confidence);
+ results.locations.push_back(loc);
+ number_of_faces++;
+ }
+ }
+
+ results.number_of_faces = number_of_faces;
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::GetInferenceResult(FacialLandMarkDetectionResults& results)
+{
+ return INFERENCE_ENGINE_ERROR_NOT_SUPPORTED;
+}
+
+int InferenceTF::GetInferenceResult(std::vector<std::vector<int>>& dimInfo, std::vector<float*>& results)
+{
+ dimInfo.clear();
+ results.clear();
+
+ std::vector<int> tmpDimInfo;
+ int tmpDims = 0;
+ for (int d = 0; d < mOutputBlob.size(); ++d) {
+ tmpDimInfo.clear();
+ tmpDims = mOutputBlob[d].shape().dims();
+
+ for (int dim = 0; dim < tmpDims; ++dim)
+ tmpDimInfo.push_back((int)mOutputBlob[d].shape().dim_size(dim));
+
+ dimInfo.push_back(tmpDimInfo);
+ results.push_back(mOutputBlob[d].flat<float>().data()); // 1-D mOutputBlob[1].flat<float>();
+ }
+
+ return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int InferenceTF::GetNumberOfOutputs()
+{
+ return mOutputNumbers;
+}
+
+void InferenceTF::SetUserListName(std::string userlist)
+{
+ mUserListName.push_back(userlist);
+}
+
+extern "C"
+{
+class IInferenceEngineVision* EngineVisionInit(std::string protoFile, std::string weightFile,
+ std::string userFile)
+{
+ InferenceTF *engine = new InferenceTF(protoFile, weightFile, userFile);
+ return engine;
+}
+
+void EngineVisionDestroy(class IInferenceEngineVision *engine)
+{
+ delete engine;
+}
+
+class IInferenceEngineCommon* EngineCommonInit(std::string protoFile, std::string weightFile,
+ std::string userFile)
+{
+ InferenceTF *engine = new InferenceTF(protoFile, weightFile, userFile);
+ return engine;
+}
+
+void EngineCommonDestroy(class IInferenceEngineCommon *engine)
+{
+ delete engine;
+}
+}
+} /* Inference */
+} /* MediaVision */
--- /dev/null
+/**
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_IMPL_TF_H__
+#define __INFERENCE_ENGINE_IMPL_TF_H__
+
+#include <inference_engine_common.h>
+#include <inference_engine_vision.h>
+
+#include "tensorflow/cc/ops/const_op.h"
+#include "tensorflow/cc/ops/image_ops.h"
+#include "tensorflow/cc/ops/standard_ops.h"
+#include "tensorflow/core/framework/graph.pb.h"
+#include "tensorflow/core/framework/tensor.h"
+#include "tensorflow/core/framework/node_def.pb.h"
+#include "tensorflow/core/framework/attr_value_util.h"
+#include "tensorflow/core/graph/default_device.h"
+#include "tensorflow/core/graph/graph_def_builder.h"
+#include "tensorflow/core/lib/core/errors.h"
+#include "tensorflow/core/lib/core/stringpiece.h"
+#include "tensorflow/core/lib/core/threadpool.h"
+#include "tensorflow/core/lib/io/path.h"
+#include "tensorflow/core/lib/strings/str_util.h"
+#include "tensorflow/core/lib/strings/stringprintf.h"
+#include "tensorflow/core/platform/env.h"
+#include "tensorflow/core/platform/init_main.h"
+#include "tensorflow/core/platform/logging.h"
+#include "tensorflow/core/platform/types.h"
+#include "tensorflow/core/public/session.h"
+#include "tensorflow/core/util/command_line_flags.h"
+
+#include <opencv2/core.hpp>
+#include <opencv2/imgproc.hpp>
+
+#include <memory>
+#include <dlog.h>
+
+/**
+* @file inference_engine_tf_private.h
+* @brief This file contains the InferenceTF class which
+* provide Tensorflow based inference functionality
+*/
+
+#ifdef LOG_TAG
+#undef LOG_TAG
+#endif
+
+#define LOG_TAG "INFERENCE_ENGINE_TF"
+
+using namespace InferenceEngineInterface::Vision;
+using namespace InferenceEngineInterface::Common;
+
+namespace InferenceEngineImpl {
+namespace TFImpl {
+
+class InferenceTF : public IInferenceEngineVision, public IInferenceEngineCommon {
+public:
+ InferenceTF(std::string protoFile,
+ std::string weightFile,
+ std::string userFile);
+
+ ~InferenceTF();
+
+ // Input Tensor Params
+
+ int SetInputTensorParam() override;
+
+ int SetInputTensorParamNode(std::string node = "input") override;
+
+ int SetInputTensorParamInput(int width, int height, int dim, int ch) override;
+
+ int SetInputTensorParamNorm(double deviation = 1.0, double mean = 0.0) override;
+
+ // Output Tensor Params
+ int SetOutputTensorParam() override;
+
+ int SetOutputTensorParamThresHold(double threshold) override;
+
+ int SetOutputTensorParamNumbers(int number) override;
+
+ int SetOutputTensorParamType(int type) override;
+
+ int SetOutPutTensorParamNodes(std::string node) override;
+
+ int SetTargetDevice(inference_target_type_e type) override;
+
+ int Load() override;
+
+ int CreateInputLayerPassage() override;
+
+ int PrepareInputLayerPassage() override;
+
+ int PrepareInputLayerPassage(inference_input_type_e type) override;
+
+ int Run(cv::Mat tensor) override;
+
+ int Run(std::vector<float> tensor) override;
+
+ int GetInferenceResult(ImageClassificationResults& results);
+
+ int GetInferenceResult(ObjectDetectionResults& results);
+
+ int GetInferenceResult(FaceDetectionResults& results);
+
+ int GetInferenceResult(FacialLandMarkDetectionResults& results);
+
+ int GetInferenceResult(std::vector<std::vector<int>>& dimInfo, std::vector<float*>& results);
+
+ int GetNumberOfOutputs() override;
+
+ void SetUserListName(std::string userList) override;
+
+public:
+ int SetUserFile();
+ int setInput(cv::Mat cvImg);
+
+private:
+ tensorflow::Tensor mInputBlob;
+ std::vector<tensorflow::Tensor> mOutputBlob;
+ tensorflow::GraphDef mGraphDef;
+ std::unique_ptr<tensorflow::Session> session;
+ std::string mInputLayer; /**< Input layer name */
+ std::string mOutputLayer; /**< Output layer name */
+ tensorflow::DataType mInputAttrType; /** Tensor data type */
+ int mMatType; /** OpenCV Mat type corresponding to mInputAttrType*/
+
+ void *mInputData;
+ cv::Mat mInputBuffer;
+
+ int mCh;
+ int mDim;
+ cv::Size mInputSize; /**< input tensor size */
+
+ double mDeviation;
+ double mMean;
+ double mThreshold;
+ int mOutputNumbers;
+ cv::Size mSourceSize; /**< input image's size */
+
+ std::string mConfigFile;
+ std::string mWeightFile;
+ std::string mUserFile;
+ std::vector<std::string> mUserListName;
+};
+
+} /* InferenceEngineImpl */
+} /* TFImpl */
+
+#endif /* __INFERENCE_ENGINE_IMPL_TF_H__ */
\ No newline at end of file