INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/${fw_name_common}.pc DESTINATION ${LIB_INSTALL_DIR}/pkgconfig)
ADD_SUBDIRECTORY(${PROJECT_SOURCE_DIR}/test)
+ADD_SUBDIRECTORY(${PROJECT_SOURCE_DIR}/tools)
IF(UNIX)
install -m 755 test/bin/inference_engine_tc %{buildroot}%{_bindir}
install -m 755 start_profiler.sh %{buildroot}%{_bindir}
install -m 666 test/res/*.bin %{buildroot}/opt/usr/images
-install -m 755 test/bin/inference_engine_cltuner %{buildroot}%{_bindir}
+install -m 755 tools/bin/inference_engine_cltuner %{buildroot}%{_bindir}
%post -p /sbin/ldconfig
%postun -p /sbin/ldconfig
set(INFERENCE_ENGINE_TEST_CASE inference_engine_tc)
set(INFERENCE_ENGINE_PROFILER inference_engine_profiler)
-set(INFERENCE_ENGINE_CLTUNER inference_engine_cltuner)
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
find_package(GTest REQUIRED)
set(GTEST_LIBRARY gtest)
-pkg_check_modules(${INFERENCE_ENGINE_CLTUNER} REQUIRED glib-2.0)
-FOREACH(flag ${${INFERENCE_ENGINE_CLTUNER}_CFLAGS})
- SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
-ENDFOREACH(flag)
-
if(NOT SKIP_WARNINGS)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CFLAGS} -Wall -Wextra -Werror")
endif()
${GTEST_MAIN_LIBRARY}
${INFERENCE_ENGINE_INTERFACE_LIB_NAME}
dl
- )
-
-add_executable(${INFERENCE_ENGINE_CLTUNER}
- ${INFER_GTEST_INC_LIST}
- ${PROJECT_SOURCE_DIR}/inference_engine_cltuner.cpp
- ${PROJECT_SOURCE_DIR}/inference_engine_test_common.cpp
- )
-
-target_link_libraries(${INFERENCE_ENGINE_CLTUNER} ${GTEST_LIBRARY}
- ${GTEST_MAIN_LIBRARY}
- ${INFERENCE_ENGINE_INTERFACE_LIB_NAME}
- ${${INFERENCE_ENGINE_CLTUNER}_LDFLAGS}
- glib-2.0
- dl
)
\ No newline at end of file
+++ /dev/null
-/**
- * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include <iostream>
-#include <glib.h>
-#include <glib/gprintf.h>
-#include <argp.h>
-#include <string.h>
-#include <tuple>
-#include <map>
-#include <fcntl.h>
-#include <unistd.h>
-#include <queue>
-#include <algorithm>
-
-#include <ctype.h>
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "inference_engine_private_type.h"
-#include "inference_engine_type.h"
-#include "inference_engine_error.h"
-#include "inference_engine_common_impl.h"
-#include "inference_engine_test_common.h"
-
-extern "C"
-{
-#include <dlog.h>
-
-#ifdef LOG_TAG
-#undef LOG_TAG
-#endif
-
-#define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
-}
-#define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
-using namespace InferenceEngineInterface::Common;
-
-static gboolean process();
-GMainLoop *g_loop;
-inference_engine_cltuner cltuner;
-inference_engine_tensor_info input_tensor_info;
-inference_engine_tensor_info output_tensor_info;
-int target_device;
-size_t height;
-size_t width;
-size_t ch;
-int tensor_type;
-int tensor_shape;
-std::vector<std::string> image_paths;
-std::vector<std::string> input_layers;
-std::vector<std::string> output_layers;
-std::vector<std::string> model_paths;
-std::vector<std::string> models;
-inference_engine_layer_property input_property;
-inference_engine_layer_property output_property;
-
-int menu_idx=0;
-
-static void show_menu(const char *title, int idx){
- g_print("*********************************************\n");
- g_print("* %38s *\n", title);
- g_print("*-------------------------------------------*\n");
- if (idx == 0) {
- g_print("* %2i. %34s *\n", 0, "INFERENCE_ENGINE_CLTUNER_READ");
- g_print("* %2i. %34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE");
- g_print("* %2i. %34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL");
- g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID");
- g_print("* %2c. %34s *\n", 'q', "Exit");
- } else if (idx == 1) {
- g_print("* %2i. %34s *\n", 1, "ic_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 2, "ic_tflite_q_model.tflite");
- g_print("* %2i. %34s *\n", 3, "od_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 4, "fd_tflite_model1.tflite");
- g_print("* %2i. %34s *\n", 5, "ped_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 6, "posenet1_lite_224.tflite");
- g_print("* %2i. %34s *\n", 7, "posenet2_lite_224.tflite");
- g_print("* %2c. %34s *\n", 'q', "Exit");
- }
- g_print("*********************************************\n\n");
-}
-
-static gboolean __interpret(char *cmd)
-{
- g_strsplit(cmd, " ", 0);
- if(strncmp(cmd, "", 1) != 0) {
- if (strncmp(cmd, "q", 1) == 0) {
- g_main_loop_quit(g_loop);
- } else {
- if (menu_idx == 0) {
- cltuner.tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd);
- g_print("tuning_mode is %d\n",cltuner.tuning_mode);
- menu_idx = 1;
- show_menu("Select Model", menu_idx);
- } else if (menu_idx == 1) {
- int idx = atoi(cmd);
- switch (idx) {
- case 1 :
- g_print("ic_tflite_model.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/image_classification.bin");
- input_layers.push_back("input_2");
- output_layers.push_back("dense_3/Softmax");
- model_paths.push_back("/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
- break;
- case 2 :
- g_print("ic_tflite_q_model.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/image_classification_q.bin");
- input_layers.push_back("input");
- output_layers.push_back("MobilenetV1/Predictions/Reshape_1");
- model_paths.push_back("/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite");
- break;
- case 3 :
- g_print("od_tflite_model.tflite is selected\n");
- height=300;
- width=300;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/object_detection.bin");
- input_layers.push_back("normalized_input_image_tensor");
- output_layers.push_back("TFLite_Detection_PostProcess");
- output_layers.push_back("TFLite_Detection_PostProcess:1");
- output_layers.push_back("TFLite_Detection_PostProcess:2");
- output_layers.push_back("TFLite_Detection_PostProcess:3");
- model_paths.push_back("/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite");
- break;
- case 4 :
- g_print("fd_tflite_model1.tflite is selected\n");
- height=300;
- width=300;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/face_detection.bin");
- input_layers.push_back("normalized_input_image_tensor");
- output_layers.push_back("TFLite_Detection_PostProcess");
- output_layers.push_back("TFLite_Detection_PostProcess:1");
- output_layers.push_back("TFLite_Detection_PostProcess:2");
- output_layers.push_back("TFLite_Detection_PostProcess:3");
- model_paths.push_back("/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite");
- break;
- case 5 :
- g_print("ped_tflite_model.tflite is selected\n");
- height=192;
- width=192;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/pose_estimation.bin");
- input_layers.push_back("image");
- output_layers.push_back("Convolutional_Pose_Machine/stage_5_out");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite");
- break;
- case 6 :
- g_print("posenet1_lite_224.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/hand.bin");
- input_layers.push_back("input");
- output_layers.push_back("mobilenetv2/boundingbox");
- output_layers.push_back("mobilenetv2/heatmap");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite");
- break;
- case 7 :
- g_print("posenet2_lite_224.tflite is selected\n");
- height=56;
- width=56;
- ch=21;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/hand.bin");
- input_layers.push_back("input");
- output_layers.push_back("mobilenetv2/coord_refine");
- output_layers.push_back("mobilenetv2/gesture");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite");
- break;
- default :
- break;
- }
-
- inference_engine_tensor_info _input_tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e)tensor_shape,
- static_cast<inference_tensor_data_type_e>(tensor_type),
- static_cast<size_t>(1 * ch * height * width)
- };
-
- inference_engine_tensor_info _output_tensor_info = {
- std::vector<size_t>{1},
- (inference_tensor_shape_type_e)tensor_shape,
- (inference_tensor_data_type_e)tensor_type,
- 1
- };
- input_tensor_info = _input_tensor_info;
- output_tensor_info = _output_tensor_info;
- if (!process()) {
- g_print("Error is occurred while doing process.\n ");
- return FALSE;
- }
-
- } else {
- g_print("unknown menu_idx [%d]", menu_idx);
- }
- }
- } else {
- g_print("Please input command.\n");
- }
- return TRUE;
-}
-
-void CheckResult(){
- std::string tune_file = model_paths[0];
- tune_file.append(".tune");
- int fd = open(tune_file.c_str(), O_RDONLY);
- if (fd == -1) {
- g_print("tune file generation failed!!\n");
- return;
- }
- off_t fsize;
- fsize = lseek(fd, 0, SEEK_END);
- g_print("************TUNE FILE GENERATED**************\n");
- g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), fsize);
- g_print("*-------------------------------------------*\n\n\n");
- close(fd);
-
- show_menu("Select Tuning Mode", menu_idx);
-}
-static gboolean __input(GIOChannel *channel,
- GIOCondition cond,
- gpointer data)
-{
- char buf[200];
- gsize read;
- GError *error = NULL;
- if (data != nullptr) {
- g_print("data: %p \n",data);
- return FALSE;
- }
- if (cond == G_IO_ERR) {
- g_print("G_IO_ERR is occurred. \n");
- return FALSE;
- }
-
- g_io_channel_read_chars(channel, buf, 200, &read, &error);
- buf[read] = '\0';
- g_strstrip(buf);
- if (!__interpret(buf)) {
- g_print("Error is occurred while doing __interpret.\n ");
- return FALSE;
- }
- return TRUE;
-}
-
-static gboolean process(){
- InferenceEngineCommon *mBackend;
- inference_engine_config config = {
- .backend_name = "armnn",
- .backend_type = INFERENCE_BACKEND_ARMNN,
- .target_devices = INFERENCE_TARGET_GPU
- };
-
- int ret = 0;
- mBackend = new InferenceEngineCommon();
- ret = mBackend->LoadConfigFile();
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("LoadConfigFile(); failed");
- return FALSE;
- }
- ret = mBackend->BindBackend(&config);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("BindBackend failed");
- return FALSE;
- }
- inference_engine_capacity capacity;
- ret = mBackend->GetBackendCapacity(&capacity);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("GetBackendCapacity failed");
- return FALSE;
- }
-
- mBackend->SetCLTuner(&cltuner);
-
- ret = mBackend->SetTargetDevices(config.target_devices);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("SetTargetDevices failed");
- return FALSE;
- }
-
- int model_type = GetModelInfo(model_paths, models);
- if (model_type <= INFERENCE_MODEL_NONE) {
- LOGE("GetModelInfo failed");
- return FALSE;
- }
-
- for (auto& input : input_layers) {
- input_property.layers.insert(std::make_pair(input, input_tensor_info));
- }
-
- ret = mBackend->SetInputLayerProperty(input_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("SetInputLayerProperty failed");
- return FALSE;
- }
-
-
- for (auto& layer : output_layers) {
- output_property.layers.insert(std::make_pair(layer, output_tensor_info));
- }
-
- ret = mBackend->SetOutputLayerProperty(output_property);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("SetOutputLayerProperty failed");
- return FALSE;
- }
-
- ret = mBackend->Load(models, (inference_model_format_e) model_type);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Load failed");
- return FALSE;
- }
-
- IETensorBuffer inputs, outputs;
- ret = PrepareTensorBuffers(mBackend, inputs, outputs);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("PrepareTensorBuffers failed");
- return FALSE;
- }
- int imageIndex = 0;
- for (auto& input : inputs) {
- CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
- }
-
- ret = mBackend->Run(inputs, outputs);
- if (ret != INFERENCE_ENGINE_ERROR_NONE) {
- LOGE("Run failed");
- return FALSE;
- }
-
- CleanupTensorBuffers(inputs, outputs);
- CheckResult();
- mBackend->UnbindBackend();
- models.clear();
-
- return TRUE;
-}
-
-int main ()
-{
- int ret = 0;
- GIOChannel *stdin_channel;
- stdin_channel = g_io_channel_unix_new(0);
- g_io_channel_set_flags(stdin_channel, G_IO_FLAG_NONBLOCK, NULL);
- g_io_add_watch(stdin_channel, G_IO_IN, (GIOFunc)__input, NULL);
-
- cltuner.active = true;
- cltuner.update = true;
-
-
- show_menu("Select Tuning Mode", menu_idx);
- g_loop = g_main_loop_new(NULL, FALSE);
- g_main_loop_run(g_loop);
-
- return ret;
-}
--- /dev/null
+project(inference_engine_test)
+
+set(INFERENCE_ENGINE_CLTUNER inference_engine_cltuner)
+
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
+
+
+find_package(GTest REQUIRED)
+set(GTEST_LIBRARY gtest)
+
+pkg_check_modules(${INFERENCE_ENGINE_CLTUNER} REQUIRED glib-2.0)
+FOREACH(flag ${${INFERENCE_ENGINE_CLTUNER}_CFLAGS})
+ SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
+ENDFOREACH(flag)
+
+if(NOT SKIP_WARNINGS)
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CFLAGS} -Wall -Wextra -Werror")
+endif()
+SET(INFERENCE_ENGINE_INTERFACE_LIB_NAME "inference-engine-interface-common")
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tools/bin/)
+
+include_directories(${PROJECT_SOURCE_DIR})
+include_directories(${CMAKE_BINARY_DIR}/test/src)
+include_directories(${CMAKE_BINARY_DIR}/include)
+include_directories(/usr/include/gtest)
+include_directories(${GLIB_INCLUDE_DIRS})
+MESSAGE(STATUS ${CMAKE_BINARY_DIR} "is cmake_binary_dir")
+
+file(GLOB INFER_GTEST_INC_LIST "${PROJECT_SOURCE_DIR}/*.h")
+
+add_executable(${INFERENCE_ENGINE_CLTUNER}
+ ${INFER_GTEST_INC_LIST}
+ ${PROJECT_SOURCE_DIR}/inference_engine_cltuner.cpp
+ ${CMAKE_BINARY_DIR}/test/src/inference_engine_test_common.cpp
+ )
+
+target_link_libraries(${INFERENCE_ENGINE_CLTUNER} ${GTEST_LIBRARY}
+ ${GTEST_MAIN_LIBRARY}
+ ${INFERENCE_ENGINE_INTERFACE_LIB_NAME}
+ ${${INFERENCE_ENGINE_CLTUNER}_LDFLAGS}
+ glib-2.0
+ dl
+ )
\ No newline at end of file
--- /dev/null
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include <iostream>
+#include <glib.h>
+#include <glib/gprintf.h>
+#include <argp.h>
+#include <string.h>
+#include <tuple>
+#include <map>
+#include <fcntl.h>
+#include <unistd.h>
+#include <queue>
+#include <algorithm>
+
+#include <ctype.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "inference_engine_private_type.h"
+#include "inference_engine_type.h"
+#include "inference_engine_error.h"
+#include "inference_engine_common_impl.h"
+#include "inference_engine_test_common.h"
+
+extern "C"
+{
+#include <dlog.h>
+
+#ifdef LOG_TAG
+#undef LOG_TAG
+#endif
+
+#define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
+}
+#define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
+using namespace InferenceEngineInterface::Common;
+
+static gboolean process();
+GMainLoop *g_loop;
+inference_engine_cltuner cltuner;
+inference_engine_tensor_info input_tensor_info;
+inference_engine_tensor_info output_tensor_info;
+int target_device;
+size_t height;
+size_t width;
+size_t ch;
+int tensor_type;
+int tensor_shape;
+std::vector<std::string> image_paths;
+std::vector<std::string> input_layers;
+std::vector<std::string> output_layers;
+std::vector<std::string> model_paths;
+std::vector<std::string> models;
+inference_engine_layer_property input_property;
+inference_engine_layer_property output_property;
+
+int menu_idx=0;
+
+static void show_menu(const char *title, int idx){
+ g_print("*********************************************\n");
+ g_print("* %38s *\n", title);
+ g_print("*-------------------------------------------*\n");
+ if (idx == 0) {
+ g_print("* %2i. %34s *\n", 0, "INFERENCE_ENGINE_CLTUNER_READ");
+ g_print("* %2i. %34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE");
+ g_print("* %2i. %34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL");
+ g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID");
+ g_print("* %2c. %34s *\n", 'q', "Exit");
+ } else if (idx == 1) {
+ g_print("* %2i. %34s *\n", 1, "ic_tflite_model.tflite");
+ g_print("* %2i. %34s *\n", 2, "ic_tflite_q_model.tflite");
+ g_print("* %2i. %34s *\n", 3, "od_tflite_model.tflite");
+ g_print("* %2i. %34s *\n", 4, "fd_tflite_model1.tflite");
+ g_print("* %2i. %34s *\n", 5, "ped_tflite_model.tflite");
+ g_print("* %2i. %34s *\n", 6, "posenet1_lite_224.tflite");
+ g_print("* %2i. %34s *\n", 7, "posenet2_lite_224.tflite");
+ g_print("* %2c. %34s *\n", 'q', "Exit");
+ }
+ g_print("*********************************************\n\n");
+}
+
+static gboolean __interpret(char *cmd)
+{
+ g_strsplit(cmd, " ", 0);
+ if(strncmp(cmd, "", 1) != 0) {
+ if (strncmp(cmd, "q", 1) == 0) {
+ g_main_loop_quit(g_loop);
+ } else {
+ if (menu_idx == 0) {
+ cltuner.tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd);
+ g_print("tuning_mode is %d\n",cltuner.tuning_mode);
+ menu_idx = 1;
+ show_menu("Select Model", menu_idx);
+ } else if (menu_idx == 1) {
+ int idx = atoi(cmd);
+ switch (idx) {
+ case 1 :
+ g_print("ic_tflite_model.tflite is selected\n");
+ height=224;
+ width=224;
+ ch=3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/image_classification.bin");
+ input_layers.push_back("input_2");
+ output_layers.push_back("dense_3/Softmax");
+ model_paths.push_back("/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
+ break;
+ case 2 :
+ g_print("ic_tflite_q_model.tflite is selected\n");
+ height=224;
+ width=224;
+ ch=3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/image_classification_q.bin");
+ input_layers.push_back("input");
+ output_layers.push_back("MobilenetV1/Predictions/Reshape_1");
+ model_paths.push_back("/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite");
+ break;
+ case 3 :
+ g_print("od_tflite_model.tflite is selected\n");
+ height=300;
+ width=300;
+ ch=3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/object_detection.bin");
+ input_layers.push_back("normalized_input_image_tensor");
+ output_layers.push_back("TFLite_Detection_PostProcess");
+ output_layers.push_back("TFLite_Detection_PostProcess:1");
+ output_layers.push_back("TFLite_Detection_PostProcess:2");
+ output_layers.push_back("TFLite_Detection_PostProcess:3");
+ model_paths.push_back("/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite");
+ break;
+ case 4 :
+ g_print("fd_tflite_model1.tflite is selected\n");
+ height=300;
+ width=300;
+ ch=3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/face_detection.bin");
+ input_layers.push_back("normalized_input_image_tensor");
+ output_layers.push_back("TFLite_Detection_PostProcess");
+ output_layers.push_back("TFLite_Detection_PostProcess:1");
+ output_layers.push_back("TFLite_Detection_PostProcess:2");
+ output_layers.push_back("TFLite_Detection_PostProcess:3");
+ model_paths.push_back("/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite");
+ break;
+ case 5 :
+ g_print("ped_tflite_model.tflite is selected\n");
+ height=192;
+ width=192;
+ ch=3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/pose_estimation.bin");
+ input_layers.push_back("image");
+ output_layers.push_back("Convolutional_Pose_Machine/stage_5_out");
+ model_paths.push_back("/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite");
+ break;
+ case 6 :
+ g_print("posenet1_lite_224.tflite is selected\n");
+ height=224;
+ width=224;
+ ch=3;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/hand.bin");
+ input_layers.push_back("input");
+ output_layers.push_back("mobilenetv2/boundingbox");
+ output_layers.push_back("mobilenetv2/heatmap");
+ model_paths.push_back("/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite");
+ break;
+ case 7 :
+ g_print("posenet2_lite_224.tflite is selected\n");
+ height=56;
+ width=56;
+ ch=21;
+ tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+ tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
+ image_paths.push_back("/opt/usr/images/hand.bin");
+ input_layers.push_back("input");
+ output_layers.push_back("mobilenetv2/coord_refine");
+ output_layers.push_back("mobilenetv2/gesture");
+ model_paths.push_back("/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite");
+ break;
+ default :
+ break;
+ }
+
+ inference_engine_tensor_info _input_tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)tensor_shape,
+ static_cast<inference_tensor_data_type_e>(tensor_type),
+ static_cast<size_t>(1 * ch * height * width)
+ };
+
+ inference_engine_tensor_info _output_tensor_info = {
+ std::vector<size_t>{1},
+ (inference_tensor_shape_type_e)tensor_shape,
+ (inference_tensor_data_type_e)tensor_type,
+ 1
+ };
+ input_tensor_info = _input_tensor_info;
+ output_tensor_info = _output_tensor_info;
+ if (!process()) {
+ g_print("Error is occurred while doing process.\n ");
+ return FALSE;
+ }
+
+ } else {
+ g_print("unknown menu_idx [%d]", menu_idx);
+ }
+ }
+ } else {
+ g_print("Please input command.\n");
+ }
+ return TRUE;
+}
+
+void CheckResult(){
+ std::string tune_file = model_paths[0];
+ tune_file.append(".tune");
+ int fd = open(tune_file.c_str(), O_RDONLY);
+ if (fd == -1) {
+ g_print("tune file generation failed!!\n");
+ return;
+ }
+ off_t fsize;
+ fsize = lseek(fd, 0, SEEK_END);
+ g_print("************TUNE FILE GENERATED**************\n");
+ g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), fsize);
+ g_print("*-------------------------------------------*\n\n\n");
+ close(fd);
+
+ show_menu("Select Tuning Mode", menu_idx);
+}
+static gboolean __input(GIOChannel *channel,
+ GIOCondition cond,
+ gpointer data)
+{
+ char buf[200];
+ gsize read;
+ GError *error = NULL;
+ if (data != nullptr) {
+ g_print("data: %p \n",data);
+ return FALSE;
+ }
+ if (cond == G_IO_ERR) {
+ g_print("G_IO_ERR is occurred. \n");
+ return FALSE;
+ }
+
+ g_io_channel_read_chars(channel, buf, 200, &read, &error);
+ buf[read] = '\0';
+ g_strstrip(buf);
+ if (!__interpret(buf)) {
+ g_print("Error is occurred while doing __interpret.\n ");
+ return FALSE;
+ }
+ return TRUE;
+}
+
+static gboolean process(){
+ InferenceEngineCommon *mBackend;
+ inference_engine_config config = {
+ .backend_name = "armnn",
+ .backend_type = INFERENCE_BACKEND_ARMNN,
+ .target_devices = INFERENCE_TARGET_GPU
+ };
+
+ int ret = 0;
+ mBackend = new InferenceEngineCommon();
+ ret = mBackend->LoadConfigFile();
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("LoadConfigFile(); failed");
+ return FALSE;
+ }
+ ret = mBackend->BindBackend(&config);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("BindBackend failed");
+ return FALSE;
+ }
+ inference_engine_capacity capacity;
+ ret = mBackend->GetBackendCapacity(&capacity);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("GetBackendCapacity failed");
+ return FALSE;
+ }
+
+ mBackend->SetCLTuner(&cltuner);
+
+ ret = mBackend->SetTargetDevices(config.target_devices);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("SetTargetDevices failed");
+ return FALSE;
+ }
+
+ int model_type = GetModelInfo(model_paths, models);
+ if (model_type <= INFERENCE_MODEL_NONE) {
+ LOGE("GetModelInfo failed");
+ return FALSE;
+ }
+
+ for (auto& input : input_layers) {
+ input_property.layers.insert(std::make_pair(input, input_tensor_info));
+ }
+
+ ret = mBackend->SetInputLayerProperty(input_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("SetInputLayerProperty failed");
+ return FALSE;
+ }
+
+
+ for (auto& layer : output_layers) {
+ output_property.layers.insert(std::make_pair(layer, output_tensor_info));
+ }
+
+ ret = mBackend->SetOutputLayerProperty(output_property);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("SetOutputLayerProperty failed");
+ return FALSE;
+ }
+
+ ret = mBackend->Load(models, (inference_model_format_e) model_type);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Load failed");
+ return FALSE;
+ }
+
+ IETensorBuffer inputs, outputs;
+ ret = PrepareTensorBuffers(mBackend, inputs, outputs);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("PrepareTensorBuffers failed");
+ return FALSE;
+ }
+ int imageIndex = 0;
+ for (auto& input : inputs) {
+ CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
+ }
+
+ ret = mBackend->Run(inputs, outputs);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("Run failed");
+ return FALSE;
+ }
+
+ CleanupTensorBuffers(inputs, outputs);
+ CheckResult();
+ mBackend->UnbindBackend();
+ models.clear();
+
+ return TRUE;
+}
+
+int main ()
+{
+ int ret = 0;
+ GIOChannel *stdin_channel;
+ stdin_channel = g_io_channel_unix_new(0);
+ g_io_channel_set_flags(stdin_channel, G_IO_FLAG_NONBLOCK, NULL);
+ g_io_add_watch(stdin_channel, G_IO_IN, (GIOFunc)__input, NULL);
+
+ cltuner.active = true;
+ cltuner.update = true;
+
+
+ show_menu("Select Tuning Mode", menu_idx);
+ g_loop = g_main_loop_new(NULL, FALSE);
+ g_main_loop_run(g_loop);
+
+ return ret;
+}