From c82182e83ec0bc3f9d3acb5cd84fd9f43ccb18b5 Mon Sep 17 00:00:00 2001 From: Hyunsoo Park Date: Tue, 25 May 2021 20:21:06 +0900 Subject: [PATCH] Extract model information Usage: inference_engine_cltuner [OPTION...] MODE MODELPATH e.g) inference_engine_cltuner 1 /usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite - Json file should be in the same location as the model file. - Loop progress is removed. (Changed to one-time-execution) Json file example [ { "image_paths" : "/opt/usr/images/image_classification.bin", "tensor_width" : 224, "tensor_height" : 224, "tensor_channels" : 3, "tensor_type" : 2, "tensor_shape" : 0, "input_layers" : [ "input_2" ], "output_layers" : [ "dense_3/Softmax" ], "model_paths" : "/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite" } ] Change-Id: Icd33dda311acc1684e8eea4a96a30fc295ff9737 Signed-off-by: Hyunsoo Park --- packaging/inference-engine-interface.spec | 1 + tools/CMakeLists.txt | 2 +- tools/inference_engine_cltuner.cpp | 308 +++++++++------------- 3 files changed, 121 insertions(+), 190 deletions(-) diff --git a/packaging/inference-engine-interface.spec b/packaging/inference-engine-interface.spec index 1eeb9f8..57c971e 100644 --- a/packaging/inference-engine-interface.spec +++ b/packaging/inference-engine-interface.spec @@ -11,6 +11,7 @@ BuildRequires: pkgconfig(libtzplatform-config) BuildRequires: pkgconfig(python) BuildRequires: pkgconfig(iniparser) BuildRequires: pkgconfig(glib-2.0) +BuildRequires: pkgconfig(json-glib-1.0) BuildRequires: gtest-devel %description diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 251690e..512ca32 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -8,7 +8,7 @@ set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG) find_package(GTest REQUIRED) set(GTEST_LIBRARY gtest) -pkg_check_modules(${INFERENCE_ENGINE_CLTUNER} REQUIRED glib-2.0) +pkg_check_modules(${INFERENCE_ENGINE_CLTUNER} REQUIRED glib-2.0 json-glib-1.0) FOREACH(flag ${${INFERENCE_ENGINE_CLTUNER}_CFLAGS}) SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}") ENDFOREACH(flag) diff --git a/tools/inference_engine_cltuner.cpp b/tools/inference_engine_cltuner.cpp index 4c52942..14bc722 100644 --- a/tools/inference_engine_cltuner.cpp +++ b/tools/inference_engine_cltuner.cpp @@ -18,6 +18,8 @@ #include #include #include +#include + #include #include #include @@ -44,7 +46,7 @@ extern "C" #ifdef LOG_TAG #undef LOG_TAG #endif - +#define MAX_STR 256 #define LOG_TAG "INFERENCE_ENGINE_CLTUNER" } #define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0])) @@ -61,6 +63,8 @@ size_t width; size_t ch; int tensor_type; int tensor_shape; +std::string model_path; +std::string json_path; std::vector image_paths; std::vector input_layers; std::vector output_layers; @@ -71,166 +75,105 @@ inference_engine_layer_property output_property; int menu_idx=0; -static void show_menu(const char *title, int idx){ +static void show_menu(const char *title){ g_print("*********************************************\n"); - g_print("* %38s *\n", title); + g_print("* %-38s *\n", title); g_print("*-------------------------------------------*\n"); - if (idx == 0) { - g_print("* %2i. %34s *\n", 0, "INFERENCE_ENGINE_CLTUNER_READ"); - g_print("* %2i. %34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE"); - g_print("* %2i. %34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL"); - g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID"); - g_print("* %2c. %34s *\n", 'q', "Exit"); - } else if (idx == 1) { - g_print("* %2i. %34s *\n", 1, "ic_tflite_model.tflite"); - g_print("* %2i. %34s *\n", 2, "ic_tflite_q_model.tflite"); - g_print("* %2i. %34s *\n", 3, "od_tflite_model.tflite"); - g_print("* %2i. %34s *\n", 4, "fd_tflite_model1.tflite"); - g_print("* %2i. %34s *\n", 5, "ped_tflite_model.tflite"); - g_print("* %2i. %34s *\n", 6, "posenet1_lite_224.tflite"); - g_print("* %2i. %34s *\n", 7, "posenet2_lite_224.tflite"); - g_print("* %2c. %34s *\n", 'q', "Exit"); - } + g_print("* %-38s *\n", "Input Tuning mode and Model file"); + g_print("* %-38s *\n", "ex)1 ic_tflite_model.tflite"); + g_print("*-------------------------------------------*\n"); + g_print("* %-38s *\n", "[MODE LIST]"); + g_print("* %2i. %34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE"); + g_print("* %2i. %34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL"); + g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID"); + g_print("*-------------------------------------------*\n"); + g_print("* %2c. %34s *\n", 'q', "Exit"); g_print("*********************************************\n\n"); } -static gboolean __interpret(char *cmd) +static gboolean __interpret(char *cmd, char *cmd2) { - g_strsplit(cmd, " ", 0); if(strncmp(cmd, "", 1) != 0) { if (strncmp(cmd, "q", 1) == 0) { - g_main_loop_quit(g_loop); + return FALSE; } else { - if (menu_idx == 0) { - cltuner.tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd); - g_print("tuning_mode is %d\n",cltuner.tuning_mode); - menu_idx = 1; - show_menu("Select Model", menu_idx); - } else if (menu_idx == 1) { - int idx = atoi(cmd); - switch (idx) { - case 1 : - g_print("ic_tflite_model.tflite is selected\n"); - height=224; - width=224; - ch=3; - tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW; - image_paths.push_back("/opt/usr/images/image_classification.bin"); - input_layers.push_back("input_2"); - output_layers.push_back("dense_3/Softmax"); - model_paths.push_back("/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite"); - break; - case 2 : - g_print("ic_tflite_q_model.tflite is selected\n"); - height=224; - width=224; - ch=3; - tensor_type = INFERENCE_TENSOR_DATA_TYPE_UINT8; - tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW; - image_paths.push_back("/opt/usr/images/image_classification_q.bin"); - input_layers.push_back("input"); - output_layers.push_back("MobilenetV1/Predictions/Reshape_1"); - model_paths.push_back("/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite"); - break; - case 3 : - g_print("od_tflite_model.tflite is selected\n"); - height=300; - width=300; - ch=3; - tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW; - image_paths.push_back("/opt/usr/images/object_detection.bin"); - input_layers.push_back("normalized_input_image_tensor"); - output_layers.push_back("TFLite_Detection_PostProcess"); - output_layers.push_back("TFLite_Detection_PostProcess:1"); - output_layers.push_back("TFLite_Detection_PostProcess:2"); - output_layers.push_back("TFLite_Detection_PostProcess:3"); - model_paths.push_back("/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite"); - break; - case 4 : - g_print("fd_tflite_model1.tflite is selected\n"); - height=300; - width=300; - ch=3; - tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW; - image_paths.push_back("/opt/usr/images/face_detection.bin"); - input_layers.push_back("normalized_input_image_tensor"); - output_layers.push_back("TFLite_Detection_PostProcess"); - output_layers.push_back("TFLite_Detection_PostProcess:1"); - output_layers.push_back("TFLite_Detection_PostProcess:2"); - output_layers.push_back("TFLite_Detection_PostProcess:3"); - model_paths.push_back("/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite"); - break; - case 5 : - g_print("ped_tflite_model.tflite is selected\n"); - height=192; - width=192; - ch=3; - tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW; - image_paths.push_back("/opt/usr/images/pose_estimation.bin"); - input_layers.push_back("image"); - output_layers.push_back("Convolutional_Pose_Machine/stage_5_out"); - model_paths.push_back("/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite"); - break; - case 6 : - g_print("posenet1_lite_224.tflite is selected\n"); - height=224; - width=224; - ch=3; - tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW; - image_paths.push_back("/opt/usr/images/hand.bin"); - input_layers.push_back("input"); - output_layers.push_back("mobilenetv2/boundingbox"); - output_layers.push_back("mobilenetv2/heatmap"); - model_paths.push_back("/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite"); - break; - case 7 : - g_print("posenet2_lite_224.tflite is selected\n"); - height=56; - width=56; - ch=21; - tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32; - tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW; - image_paths.push_back("/opt/usr/images/hand.bin"); - input_layers.push_back("input"); - output_layers.push_back("mobilenetv2/coord_refine"); - output_layers.push_back("mobilenetv2/gesture"); - model_paths.push_back("/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite"); - break; - default : - break; + char** value; + JsonParser *parser; + GError *error = NULL; + cltuner.tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd); + model_path = cmd2; + value = g_strsplit(cmd2, ".", 0); + json_path.append(value[0]); + json_path.append(".json"); + + LOGI("tuning_mode : [%d]\n",cltuner.tuning_mode); + LOGI("model_path : [%s]\n", model_path.c_str()); + LOGI("jsonfile path [%s] \n",json_path.c_str()); + g_strfreev(value); + + parser = json_parser_new(); + json_parser_load_from_file(parser, json_path.c_str(), &error); + if (error) { + LOGE("Unable to parse file '%s': %s\n", json_path.c_str(), error->message); + g_error_free(error); + g_object_unref(parser); + return FALSE; + } + JsonNode *root = json_parser_get_root(parser); + JsonArray *attr_array = json_node_get_array(root); + + const guint attr_num = json_array_get_length(attr_array); + + for (guint attrIdx = 0; attrIdx < attr_num; ++attrIdx) { + JsonNode *attr_node = json_array_get_element(attr_array, attrIdx); + + JsonObject *attr_obj = json_node_get_object(attr_node); + + image_paths.push_back(json_object_get_string_member(attr_obj, "image_paths")); + width = (int)json_object_get_int_member(attr_obj, "tensor_width"); + height = (int)json_object_get_int_member(attr_obj, "tensor_height"); + ch = (int)json_object_get_int_member(attr_obj, "tensor_channels"); + tensor_type = (inference_tensor_data_type_e)json_object_get_int_member(attr_obj, "tensor_type"); + tensor_shape = (inference_tensor_shape_type_e)json_object_get_int_member(attr_obj, "tensor_shape"); + model_paths.push_back(json_object_get_string_member(attr_obj, "model_paths")); + JsonArray *t_layers = json_object_get_array_member(attr_obj,"input_layers"); + guint layers_size = json_array_get_length(t_layers); + LOGI("input layers size : %d \n", layers_size); + guint idx = 0; + for(idx=0; idx(tensor_type), - static_cast(1 * ch * height * width) - }; - - inference_engine_tensor_info _output_tensor_info = { - std::vector{1}, - (inference_tensor_shape_type_e)tensor_shape, - (inference_tensor_data_type_e)tensor_type, - 1 - }; - input_tensor_info = _input_tensor_info; - output_tensor_info = _output_tensor_info; - if (!process()) { - g_print("Error is occurred while doing process.\n "); - return FALSE; + t_layers = json_object_get_array_member(attr_obj,"output_layers"); + layers_size = json_array_get_length(t_layers); + LOGI("output layers size : %d \n", layers_size); + for(idx = 0; idx(tensor_type), + static_cast(1 * ch * height * width) + }; + + inference_engine_tensor_info _output_tensor_info = { + std::vector{1}, + (inference_tensor_shape_type_e)tensor_shape, + (inference_tensor_data_type_e)tensor_type, + 1 + }; + input_tensor_info = _input_tensor_info; + output_tensor_info = _output_tensor_info; + if (!process()) { + LOGE("Error is occurred while doing process.\n "); + return FALSE; } } - } else { - g_print("Please input command.\n"); } return TRUE; } @@ -246,36 +189,9 @@ void CheckResult(){ off_t fsize; fsize = lseek(fd, 0, SEEK_END); g_print("************TUNE FILE GENERATED**************\n"); - g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), fsize); + g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), (long long)fsize); g_print("*-------------------------------------------*\n\n\n"); close(fd); - - show_menu("Select Tuning Mode", menu_idx); -} -static gboolean __input(GIOChannel *channel, - GIOCondition cond, - gpointer data) -{ - char buf[200]; - gsize read; - GError *error = NULL; - if (data != nullptr) { - g_print("data: %p \n",data); - return FALSE; - } - if (cond == G_IO_ERR) { - g_print("G_IO_ERR is occurred. \n"); - return FALSE; - } - - g_io_channel_read_chars(channel, buf, 200, &read, &error); - buf[read] = '\0'; - g_strstrip(buf); - if (!__interpret(buf)) { - g_print("Error is occurred while doing __interpret.\n "); - return FALSE; - } - return TRUE; } static gboolean process(){ @@ -304,8 +220,15 @@ static gboolean process(){ LOGE("GetBackendCapacity failed"); return FALSE; } - - mBackend->SetCLTuner(&cltuner); + if (capacity.cltuner_supported) { + cltuner.active = true; + cltuner.update = true; + ret = mBackend->SetCLTuner(&cltuner); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { + LOGE("SetCLTuner failed"); + return FALSE; + } + } ret = mBackend->SetTargetDevices(config.target_devices); if (ret != INFERENCE_ENGINE_ERROR_NONE) { @@ -348,6 +271,7 @@ static gboolean process(){ IETensorBuffer inputs, outputs; ret = PrepareTensorBuffers(mBackend, inputs, outputs); + if (ret != INFERENCE_ENGINE_ERROR_NONE) { LOGE("PrepareTensorBuffers failed"); return FALSE; @@ -364,7 +288,6 @@ static gboolean process(){ } CleanupTensorBuffers(inputs, outputs); - CheckResult(); mBackend->UnbindBackend(); models.clear(); @@ -374,18 +297,25 @@ static gboolean process(){ int main () { int ret = 0; - GIOChannel *stdin_channel; - stdin_channel = g_io_channel_unix_new(0); - g_io_channel_set_flags(stdin_channel, G_IO_FLAG_NONBLOCK, NULL); - g_io_add_watch(stdin_channel, G_IO_IN, (GIOFunc)__input, NULL); - - cltuner.active = true; - cltuner.update = true; - - - show_menu("Select Tuning Mode", menu_idx); - g_loop = g_main_loop_new(NULL, FALSE); - g_main_loop_run(g_loop); + char mode[MAX_STR]={0}; + char file_path[MAX_STR]={0}; + + show_menu("CLtuner Generator"); + ret = scanf("%s %s", mode, file_path); + int _mode = atoi(mode); + if (_mode < 0 || _mode > 3 ) { + g_print("Check tuning mode. It could be out of between RAPID and EXHAUST mode.(1~3)\n"); + return -1; + } + char** value; + value = g_strsplit(file_path, ".", 0); + g_r + if (value[0] == NULL || value[1] == NULL){ + g_print("Check filepath. Please write full path. i.g /root/model.tflite\n"); + return -1; + } + __interpret(mode,file_path); // validation check. + CheckResult(); return ret; } -- 2.34.1