#include <iostream>
#include <glib.h>
#include <glib/gprintf.h>
+#include <json-glib/json-glib.h>
+
#include <argp.h>
#include <string.h>
#include <tuple>
#ifdef LOG_TAG
#undef LOG_TAG
#endif
-
+#define MAX_STR 256
#define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
}
#define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
size_t ch;
int tensor_type;
int tensor_shape;
+std::string model_path;
+std::string json_path;
std::vector<std::string> image_paths;
std::vector<std::string> input_layers;
std::vector<std::string> output_layers;
int menu_idx=0;
-static void show_menu(const char *title, int idx){
+static void show_menu(const char *title){
g_print("*********************************************\n");
- g_print("* %38s *\n", title);
+ g_print("* %-38s *\n", title);
g_print("*-------------------------------------------*\n");
- if (idx == 0) {
- g_print("* %2i. %34s *\n", 0, "INFERENCE_ENGINE_CLTUNER_READ");
- g_print("* %2i. %34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE");
- g_print("* %2i. %34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL");
- g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID");
- g_print("* %2c. %34s *\n", 'q', "Exit");
- } else if (idx == 1) {
- g_print("* %2i. %34s *\n", 1, "ic_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 2, "ic_tflite_q_model.tflite");
- g_print("* %2i. %34s *\n", 3, "od_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 4, "fd_tflite_model1.tflite");
- g_print("* %2i. %34s *\n", 5, "ped_tflite_model.tflite");
- g_print("* %2i. %34s *\n", 6, "posenet1_lite_224.tflite");
- g_print("* %2i. %34s *\n", 7, "posenet2_lite_224.tflite");
- g_print("* %2c. %34s *\n", 'q', "Exit");
- }
+ g_print("* %-38s *\n", "Input Tuning mode and Model file");
+ g_print("* %-38s *\n", "ex)1 ic_tflite_model.tflite");
+ g_print("*-------------------------------------------*\n");
+ g_print("* %-38s *\n", "[MODE LIST]");
+ g_print("* %2i. %34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE");
+ g_print("* %2i. %34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL");
+ g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID");
+ g_print("*-------------------------------------------*\n");
+ g_print("* %2c. %34s *\n", 'q', "Exit");
g_print("*********************************************\n\n");
}
-static gboolean __interpret(char *cmd)
+static gboolean __interpret(char *cmd, char *cmd2)
{
- g_strsplit(cmd, " ", 0);
if(strncmp(cmd, "", 1) != 0) {
if (strncmp(cmd, "q", 1) == 0) {
- g_main_loop_quit(g_loop);
+ return FALSE;
} else {
- if (menu_idx == 0) {
- cltuner.tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd);
- g_print("tuning_mode is %d\n",cltuner.tuning_mode);
- menu_idx = 1;
- show_menu("Select Model", menu_idx);
- } else if (menu_idx == 1) {
- int idx = atoi(cmd);
- switch (idx) {
- case 1 :
- g_print("ic_tflite_model.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/image_classification.bin");
- input_layers.push_back("input_2");
- output_layers.push_back("dense_3/Softmax");
- model_paths.push_back("/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
- break;
- case 2 :
- g_print("ic_tflite_q_model.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/image_classification_q.bin");
- input_layers.push_back("input");
- output_layers.push_back("MobilenetV1/Predictions/Reshape_1");
- model_paths.push_back("/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite");
- break;
- case 3 :
- g_print("od_tflite_model.tflite is selected\n");
- height=300;
- width=300;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/object_detection.bin");
- input_layers.push_back("normalized_input_image_tensor");
- output_layers.push_back("TFLite_Detection_PostProcess");
- output_layers.push_back("TFLite_Detection_PostProcess:1");
- output_layers.push_back("TFLite_Detection_PostProcess:2");
- output_layers.push_back("TFLite_Detection_PostProcess:3");
- model_paths.push_back("/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite");
- break;
- case 4 :
- g_print("fd_tflite_model1.tflite is selected\n");
- height=300;
- width=300;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/face_detection.bin");
- input_layers.push_back("normalized_input_image_tensor");
- output_layers.push_back("TFLite_Detection_PostProcess");
- output_layers.push_back("TFLite_Detection_PostProcess:1");
- output_layers.push_back("TFLite_Detection_PostProcess:2");
- output_layers.push_back("TFLite_Detection_PostProcess:3");
- model_paths.push_back("/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite");
- break;
- case 5 :
- g_print("ped_tflite_model.tflite is selected\n");
- height=192;
- width=192;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/pose_estimation.bin");
- input_layers.push_back("image");
- output_layers.push_back("Convolutional_Pose_Machine/stage_5_out");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite");
- break;
- case 6 :
- g_print("posenet1_lite_224.tflite is selected\n");
- height=224;
- width=224;
- ch=3;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/hand.bin");
- input_layers.push_back("input");
- output_layers.push_back("mobilenetv2/boundingbox");
- output_layers.push_back("mobilenetv2/heatmap");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite");
- break;
- case 7 :
- g_print("posenet2_lite_224.tflite is selected\n");
- height=56;
- width=56;
- ch=21;
- tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
- tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
- image_paths.push_back("/opt/usr/images/hand.bin");
- input_layers.push_back("input");
- output_layers.push_back("mobilenetv2/coord_refine");
- output_layers.push_back("mobilenetv2/gesture");
- model_paths.push_back("/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite");
- break;
- default :
- break;
+ char** value;
+ JsonParser *parser;
+ GError *error = NULL;
+ cltuner.tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd);
+ model_path = cmd2;
+ value = g_strsplit(cmd2, ".", 0);
+ json_path.append(value[0]);
+ json_path.append(".json");
+
+ LOGI("tuning_mode : [%d]\n",cltuner.tuning_mode);
+ LOGI("model_path : [%s]\n", model_path.c_str());
+ LOGI("jsonfile path [%s] \n",json_path.c_str());
+ g_strfreev(value);
+
+ parser = json_parser_new();
+ json_parser_load_from_file(parser, json_path.c_str(), &error);
+ if (error) {
+ LOGE("Unable to parse file '%s': %s\n", json_path.c_str(), error->message);
+ g_error_free(error);
+ g_object_unref(parser);
+ return FALSE;
+ }
+ JsonNode *root = json_parser_get_root(parser);
+ JsonArray *attr_array = json_node_get_array(root);
+
+ const guint attr_num = json_array_get_length(attr_array);
+
+ for (guint attrIdx = 0; attrIdx < attr_num; ++attrIdx) {
+ JsonNode *attr_node = json_array_get_element(attr_array, attrIdx);
+
+ JsonObject *attr_obj = json_node_get_object(attr_node);
+
+ image_paths.push_back(json_object_get_string_member(attr_obj, "image_paths"));
+ width = (int)json_object_get_int_member(attr_obj, "tensor_width");
+ height = (int)json_object_get_int_member(attr_obj, "tensor_height");
+ ch = (int)json_object_get_int_member(attr_obj, "tensor_channels");
+ tensor_type = (inference_tensor_data_type_e)json_object_get_int_member(attr_obj, "tensor_type");
+ tensor_shape = (inference_tensor_shape_type_e)json_object_get_int_member(attr_obj, "tensor_shape");
+ model_paths.push_back(json_object_get_string_member(attr_obj, "model_paths"));
+ JsonArray *t_layers = json_object_get_array_member(attr_obj,"input_layers");
+ guint layers_size = json_array_get_length(t_layers);
+ LOGI("input layers size : %d \n", layers_size);
+ guint idx = 0;
+ for(idx=0; idx<layers_size; ++idx){
+ LOGI("input layer name %s \n", json_array_get_string_element(t_layers, idx));
+ input_layers.push_back(json_array_get_string_element(t_layers, idx));
}
- inference_engine_tensor_info _input_tensor_info = {
- { 1, ch, height, width },
- (inference_tensor_shape_type_e)tensor_shape,
- static_cast<inference_tensor_data_type_e>(tensor_type),
- static_cast<size_t>(1 * ch * height * width)
- };
-
- inference_engine_tensor_info _output_tensor_info = {
- std::vector<size_t>{1},
- (inference_tensor_shape_type_e)tensor_shape,
- (inference_tensor_data_type_e)tensor_type,
- 1
- };
- input_tensor_info = _input_tensor_info;
- output_tensor_info = _output_tensor_info;
- if (!process()) {
- g_print("Error is occurred while doing process.\n ");
- return FALSE;
+ t_layers = json_object_get_array_member(attr_obj,"output_layers");
+ layers_size = json_array_get_length(t_layers);
+ LOGI("output layers size : %d \n", layers_size);
+ for(idx = 0; idx<layers_size; ++idx){
+ LOGI("output layer name %s \n", json_array_get_string_element(t_layers, idx));
+ output_layers.push_back(json_array_get_string_element(t_layers, idx));
}
+ }
- } else {
- g_print("unknown menu_idx [%d]", menu_idx);
+ inference_engine_tensor_info _input_tensor_info = {
+ { 1, ch, height, width },
+ (inference_tensor_shape_type_e)tensor_shape,
+ static_cast<inference_tensor_data_type_e>(tensor_type),
+ static_cast<size_t>(1 * ch * height * width)
+ };
+
+ inference_engine_tensor_info _output_tensor_info = {
+ std::vector<size_t>{1},
+ (inference_tensor_shape_type_e)tensor_shape,
+ (inference_tensor_data_type_e)tensor_type,
+ 1
+ };
+ input_tensor_info = _input_tensor_info;
+ output_tensor_info = _output_tensor_info;
+ if (!process()) {
+ LOGE("Error is occurred while doing process.\n ");
+ return FALSE;
}
}
- } else {
- g_print("Please input command.\n");
}
return TRUE;
}
off_t fsize;
fsize = lseek(fd, 0, SEEK_END);
g_print("************TUNE FILE GENERATED**************\n");
- g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), fsize);
+ g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), (long long)fsize);
g_print("*-------------------------------------------*\n\n\n");
close(fd);
-
- show_menu("Select Tuning Mode", menu_idx);
-}
-static gboolean __input(GIOChannel *channel,
- GIOCondition cond,
- gpointer data)
-{
- char buf[200];
- gsize read;
- GError *error = NULL;
- if (data != nullptr) {
- g_print("data: %p \n",data);
- return FALSE;
- }
- if (cond == G_IO_ERR) {
- g_print("G_IO_ERR is occurred. \n");
- return FALSE;
- }
-
- g_io_channel_read_chars(channel, buf, 200, &read, &error);
- buf[read] = '\0';
- g_strstrip(buf);
- if (!__interpret(buf)) {
- g_print("Error is occurred while doing __interpret.\n ");
- return FALSE;
- }
- return TRUE;
}
static gboolean process(){
LOGE("GetBackendCapacity failed");
return FALSE;
}
-
- mBackend->SetCLTuner(&cltuner);
+ if (capacity.cltuner_supported) {
+ cltuner.active = true;
+ cltuner.update = true;
+ ret = mBackend->SetCLTuner(&cltuner);
+ if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+ LOGE("SetCLTuner failed");
+ return FALSE;
+ }
+ }
ret = mBackend->SetTargetDevices(config.target_devices);
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
IETensorBuffer inputs, outputs;
ret = PrepareTensorBuffers(mBackend, inputs, outputs);
+
if (ret != INFERENCE_ENGINE_ERROR_NONE) {
LOGE("PrepareTensorBuffers failed");
return FALSE;
}
CleanupTensorBuffers(inputs, outputs);
- CheckResult();
mBackend->UnbindBackend();
models.clear();
int main ()
{
int ret = 0;
- GIOChannel *stdin_channel;
- stdin_channel = g_io_channel_unix_new(0);
- g_io_channel_set_flags(stdin_channel, G_IO_FLAG_NONBLOCK, NULL);
- g_io_add_watch(stdin_channel, G_IO_IN, (GIOFunc)__input, NULL);
-
- cltuner.active = true;
- cltuner.update = true;
-
-
- show_menu("Select Tuning Mode", menu_idx);
- g_loop = g_main_loop_new(NULL, FALSE);
- g_main_loop_run(g_loop);
+ char mode[MAX_STR]={0};
+ char file_path[MAX_STR]={0};
+
+ show_menu("CLtuner Generator");
+ ret = scanf("%s %s", mode, file_path);
+ int _mode = atoi(mode);
+ if (_mode < 0 || _mode > 3 ) {
+ g_print("Check tuning mode. It could be out of between RAPID and EXHAUST mode.(1~3)\n");
+ return -1;
+ }
+ char** value;
+ value = g_strsplit(file_path, ".", 0);
+ g_r
+ if (value[0] == NULL || value[1] == NULL){
+ g_print("Check filepath. Please write full path. i.g /root/model.tflite\n");
+ return -1;
+ }
+ __interpret(mode,file_path); // validation check.
+ CheckResult();
return ret;
}