[CLtuner] Refactorying CLtuner to use json parser 66/265266/12
authorHyunsoo Park <hance.park@samsung.com>
Tue, 5 Oct 2021 04:09:19 +0000 (13:09 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Thu, 4 Nov 2021 03:54:05 +0000 (12:54 +0900)
Change-Id: I79fb610fe4d09c54d3bd6f0898c9fc0ed46e8edf
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
13 files changed:
packaging/inference-engine-interface.spec
tools/CMakeLists.txt
tools/include/InputMetadata.h [new file with mode: 0644]
tools/include/Metadata.h [new file with mode: 0644]
tools/include/OutputMetadata.h [new file with mode: 0644]
tools/include/OutputMetadataTypes.h [new file with mode: 0644]
tools/include/inference_engine_cltuner.h [new file with mode: 0644]
tools/inference_engine_cltuner.cpp [deleted file]
tools/src/CMakeLists.txt [new file with mode: 0644]
tools/src/InputMetadata.cpp [new file with mode: 0644]
tools/src/Metadata.cpp [new file with mode: 0644]
tools/src/OutputMetadata.cpp [new file with mode: 0755]
tools/src/inference_engine_cltuner.cpp [new file with mode: 0644]

index d94b291dd3cffd0bad8f368d363f02279174fba2..4ec1b4f7b74c76330d4bcc570538f8c77becd4e5 100644 (file)
@@ -1,6 +1,6 @@
 Name:        inference-engine-interface
 Summary:     Interface of inference engines
-Version:     0.0.2
+Version:     0.0.3
 Release:     14
 Group:       Multimedia/Framework
 License:     Apache-2.0
@@ -11,6 +11,7 @@ BuildRequires: pkgconfig(libtzplatform-config)
 BuildRequires: pkgconfig(python)
 BuildRequires: pkgconfig(iniparser)
 BuildRequires: pkgconfig(glib-2.0)
+BuildRequires: pkgconfig(json-glib-1.0)
 BuildRequires: gtest-devel
 
 %description
index 251690ec8e55ffa7654d192e0861b92a8f683eb9..a228f8fa1e417971d472a0b95578033d2dd9d891 100644 (file)
@@ -1,43 +1,10 @@
-project(inference_engine_test)
+cmake_minimum_required(VERSION 2.6)
 
-set(INFERENCE_ENGINE_CLTUNER inference_engine_cltuner)
-
-set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
-
-
-find_package(GTest REQUIRED)
-set(GTEST_LIBRARY gtest)
-
-pkg_check_modules(${INFERENCE_ENGINE_CLTUNER} REQUIRED glib-2.0)
-FOREACH(flag ${${INFERENCE_ENGINE_CLTUNER}_CFLAGS})
-    SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
-ENDFOREACH(flag)
+SET(INFERENCE_ENGINE_INTERFACE_LIB_NAME "inference-engine-interface-common")
 
 if(NOT SKIP_WARNINGS)
     set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CFLAGS} -Wall -Wextra -Werror")
 endif()
-SET(INFERENCE_ENGINE_INTERFACE_LIB_NAME "inference-engine-interface-common")
-set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tools/bin/)
-
-include_directories(${PROJECT_SOURCE_DIR})
-include_directories(${CMAKE_BINARY_DIR}/test/src)
-include_directories(${CMAKE_BINARY_DIR}/include)
-include_directories(/usr/include/gtest)
-include_directories(${GLIB_INCLUDE_DIRS})
-MESSAGE(STATUS ${CMAKE_BINARY_DIR} "is cmake_binary_dir")
-
-file(GLOB INFER_GTEST_INC_LIST "${PROJECT_SOURCE_DIR}/*.h")
-
-add_executable(${INFERENCE_ENGINE_CLTUNER}
-               ${INFER_GTEST_INC_LIST}
-               ${PROJECT_SOURCE_DIR}/inference_engine_cltuner.cpp
-               ${CMAKE_BINARY_DIR}/test/src/inference_engine_test_common.cpp
-               )
 
-target_link_libraries(${INFERENCE_ENGINE_CLTUNER} ${GTEST_LIBRARY}
-                                      ${GTEST_MAIN_LIBRARY}
-                                      ${INFERENCE_ENGINE_INTERFACE_LIB_NAME}
-                                      ${${INFERENCE_ENGINE_CLTUNER}_LDFLAGS}
-                                      glib-2.0
-                                      dl
-                                      )
\ No newline at end of file
+add_subdirectory(include)
+add_subdirectory(src)
diff --git a/tools/include/InputMetadata.h b/tools/include/InputMetadata.h
new file mode 100644 (file)
index 0000000..05efb7b
--- /dev/null
@@ -0,0 +1,133 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_INPUTMETADATA_H__
+#define __INFERENCE_ENGINE_INPUTMETADATA_H__
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <inference_engine_type.h>
+#include <inference_engine_cltuner.h>
+#include <json-glib/json-glib.h>
+
+/**
+ * @file InputMetadata.h
+ * @brief This file contains the metadata class definition which
+ *        provides metadata of a model.
+ */
+
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+       class Options
+       {
+       public:
+               class Normalization
+               {
+               public:
+                       bool use;
+                       std::vector<double> mean;
+                       std::vector<double> std;
+
+                       Normalization() : use(false) {}
+                       ~Normalization() = default;
+               };
+
+               class Quantization
+               {
+               public:
+                       bool use;
+                       std::vector<double> scale;
+                       std::vector<double> zeropoint;
+
+                       Quantization() : use(false) {};
+                       ~Quantization() = default;
+               };
+
+               Normalization normalization;
+               Quantization  quantization;
+
+               Options() = default;
+               ~Options() = default;
+       };
+
+       class LayerInfo
+       {
+       public:
+
+               std::string name;
+               std::vector<int> dims;
+               inference_colorspace_e colorSpace;
+               inference_tensor_data_type_e dataType;
+               inference_tensor_shape_type_e shapeType; // TODO: define mv_inference_shape_type_e
+
+               LayerInfo() = default;
+               ~LayerInfo() = default;
+
+               int GetWidth() const;
+               int GetHeight() const;
+               int GetChannel() const;
+       };
+
+       class InputMetadata
+       {
+       public:
+               bool parsed;
+               std::map<std::string, LayerInfo> layer;
+               std::map<std::string, Options> option;
+
+               /**
+                * @brief   Creates an InputMetadata class instance.
+                *
+                * @since_tizen 6.5
+                */
+               InputMetadata();
+
+               /**
+                * @brief   Destroys an InputMetadata class instance including
+                *          its all resources.
+                *
+                * @since_tizen 6.5
+                */
+               ~InputMetadata() = default;
+
+               /**
+                * @brief Parses an InputMetadata
+                *
+                * @since_tizen 6.5
+                */
+               int Parse(JsonObject *root);
+
+       private:
+               std::map<std::string, inference_tensor_shape_type_e> mSupportedShapeType;
+               std::map<std::string, inference_tensor_data_type_e> mSupportedDataType;
+               std::map<std::string, inference_colorspace_e> mSupportedColorSpace;
+
+               template <typename T>
+               static T GetSupportedType(JsonObject* root, std::string typeName,
+                                                               std::map<std::string, T>& supportedTypes);
+               int GetTensorInfo(JsonObject* root);
+               int GetPreProcess(JsonObject* root);
+
+       };
+
+} /* Inference */
+} /* MediaVision */
+
+#endif /* __INFERENCE_ENGINE_INPUTMETADATA_H__ */
diff --git a/tools/include/Metadata.h b/tools/include/Metadata.h
new file mode 100644 (file)
index 0000000..a911046
--- /dev/null
@@ -0,0 +1,87 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_METADATA_H__
+#define __INFERENCE_ENGINE_METADATA_H__
+
+#include <string>
+#include <map>
+
+#include "InputMetadata.h"
+#include "OutputMetadata.h"
+#include <json-glib/json-glib.h>
+
+/**
+ * @file Metadata.h
+ * @brief This file contains the metadata class definition which
+ *        provides metadata of a model.
+ */
+
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+       class Metadata
+       {
+       public:
+               /**
+                * @brief   Creates an Metadata class instance.
+                *
+                * @since_tizen 6.5
+                */
+               Metadata() = default;
+
+               /**
+                * @brief   Destroys an Metadata class instance including
+                *          its all resources.
+                *
+                * @since_tizen 6.5
+                */
+               ~Metadata() = default;
+
+               /**
+                * @brief Initializes an Metadata class
+                *
+                * @since_tizen 6.5
+                */
+               int Init(const std::string& filename);
+
+               /**
+                * @brief Parses a metafile and set values to InputMetadata
+                *        and OutputMetadata
+                *
+                * @since_tizen 6.5
+                */
+               int Parse();
+
+               InputMetadata& GetInputMeta();
+               OutputMetadata& GetOutputMeta();
+
+       private:
+               int ParseInputMeta(JsonObject *object);
+               int ParseOutputMeta(JsonObject *object);
+
+       private:
+               std::string mMetafile;
+
+               InputMetadata mInputMeta;
+               OutputMetadata mOutputMeta;
+       };
+
+} /* Inference */
+} /* MediaVision */
+
+#endif /* __INFERENCE_ENGINE_METADATA_H__ */
diff --git a/tools/include/OutputMetadata.h b/tools/include/OutputMetadata.h
new file mode 100644 (file)
index 0000000..bf379fd
--- /dev/null
@@ -0,0 +1,129 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_OUTPUTMETADATA_H__
+#define __INFERENCE_ENGINE_OUTPUTMETADATA_H__
+
+#include <string>
+#include <vector>
+#include <map>
+#include <memory>
+
+#include <inference_engine_type.h>
+#include <inference_engine_cltuner.h>
+#include <json-glib/json-glib.h>
+#include "OutputMetadataTypes.h"
+
+/**
+ * @file OutputMetadata.h
+ * @brief This file contains the metadata class definition which
+ *        provides metadata of a model.
+ */
+
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+       class DimInfo
+       {
+       private:
+               std::vector<int> dims;
+       public:
+               std::vector<int> GetValidIndexAll() const;
+               void SetValidIndex(int index);
+       };
+
+       class DeQuantization
+       {
+       private:
+               double scale;
+               double zeropoint;
+       public:
+               DeQuantization(double s, double z) : scale(s), zeropoint(z) {};
+               ~DeQuantization() = default;
+
+               double GetScale() { return scale; }
+               double GetZeroPoint() { return zeropoint; }
+       };
+
+       class ScoreInfo
+       {
+       private:
+               std::string name;
+               DimInfo dimInfo;
+               double threshold;
+               inference_score_type_e type;
+               int topNumber;
+               std::shared_ptr<DeQuantization> deQuantization;
+
+               std::map<std::string, inference_score_type_e> supportedScoreTypes;
+
+       public:
+               ScoreInfo();
+               ~ScoreInfo() = default;
+
+               std::string GetName() { return name; }
+               DimInfo GetDimInfo() { return dimInfo; }
+               double GetThresHold() { return threshold; }
+               inference_score_type_e GetType() { return type; }
+               int GetTopNumber() { return topNumber; }
+               std::shared_ptr<DeQuantization> GetDeQuant() { return deQuantization; }
+
+               int ParseScore(JsonObject *root);
+       };
+
+       class OutputMetadata
+       {
+       private:
+               bool parsed;
+               ScoreInfo score;
+
+               int ParseScore(JsonObject *root);
+
+       public:
+               static std::map<std::string, inference_tensor_shape_type_e> supportedTensorShapes;
+               /**
+                * @brief   Creates an OutputMetadata class instance.
+                *
+                * @since_tizen 6.5
+                */
+               OutputMetadata();
+
+               /**
+                * @brief   Destroys an OutputMetadata class instance including
+                *          its all resources.
+                *
+                * @since_tizen 6.5
+                */
+               ~OutputMetadata() = default;
+
+               /** @brief Parses an OutputMetadata
+                *
+                * @since_tizen 6.5
+                */
+               int Parse(JsonObject *root);
+
+               bool IsParsed();
+               ScoreInfo& GetScore();
+               template <typename T>
+               static T GetSupportedType(JsonObject* root, std::string typeName,
+                                                               std::map<std::string, T>& supportedTypes);
+       };
+
+} /* Inference */
+} /* MediaVision */
+
+#endif /* __INFERENCE_ENGINE_OUTPUTMETADATA_H__ */
diff --git a/tools/include/OutputMetadataTypes.h b/tools/include/OutputMetadataTypes.h
new file mode 100644 (file)
index 0000000..c5379d0
--- /dev/null
@@ -0,0 +1,81 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __INFERENCE_ENGINE_OUTPUTMETADATA_TYPES_H__
+#define __INFERENCE_ENGINE_OUTPUTMETADATA_TYPES_H__
+
+/**
+ * @file OutputMetadataTypes.h
+ * @brief This file contains supported output metadata types.
+ */
+
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+       // score
+    typedef enum {
+               INFERENCE_SCORE_TYPE_NORMAL,
+               INFERENCE_SCORE_TYPE_SIGMOID
+       } inference_score_type_e;
+
+       // box
+       typedef enum {
+               INFERENCE_BOX_TYPE_ORIGIN_LEFTTOP,
+               INFERENCE_BOX_TYPE_ORIGIN_CENTER
+       } inference_box_type_e;
+
+       typedef enum {
+               INFERENCE_BOX_COORDINATE_TYPE_RATIO,
+               INFERENCE_BOX_COORDINATE_TYPE_PIXEL
+       } inference_box_coordinate_type_e;
+
+       typedef enum {
+               INFERENCE_BOX_DECODING_TYPE_BYPASS,
+               INFERENCE_BOX_DECODING_TYPE_SSD_ANCHOR,
+       } inference_box_decoding_type_e;
+
+       typedef enum {
+               INFERENCE_BOX_NMS_TYPE_NONE = -1,
+               INFERENCE_BOX_NMS_TYPE_STANDARD
+       } inference_box_nms_type_e;
+
+       // landmark
+       typedef enum {
+               INFERENCE_LANDMARK_TYPE_2D_SINGLE,
+               INFERENCE_LANDMARK_TYPE_2D_MULTI,
+               INFERENCE_LANDMARK_TYPE_3D_SINGLE
+       } inference_landmark_type_e;
+
+       typedef enum {
+               INFERENCE_LANDMARK_COORDINATE_TYPE_RATIO,
+               INFERENCE_LANDMARK_COORDINATE_TYPE_PIXEL
+       } inference_landmark_coorindate_type_e;
+
+       typedef enum {
+               INFERENCE_LANDMARK_DECODING_TYPE_BYPASS,
+               INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP,
+               INFERENCE_LANDMARK_DECODING_TYPE_HEATMAP_REFINE
+       } inference_landmark_decoding_type_e;
+
+       typedef enum {
+               INFERENCE_DISPLACEMENT_TYPE_FORWARD,
+               INFERENCE_DISPLACEMENT_TYPE_BACKWARD
+       } inference_displacement_type_e;
+}
+}
+
+#endif /* __INFERENCE_ENGINE_OUTPUTMETADATA_TYPES_H__ */
\ No newline at end of file
diff --git a/tools/include/inference_engine_cltuner.h b/tools/include/inference_engine_cltuner.h
new file mode 100644 (file)
index 0000000..4e2cfe5
--- /dev/null
@@ -0,0 +1,97 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef __INFERENCE_ENGINE_CLTUNER_H__
+#define __INFERENCE_ENGINE_CLTUNER_H__
+
+#include <string.h>
+#include <unistd.h>
+#include <dlog.h>
+
+#include "inference_engine_private_type.h"
+#include "inference_engine_type.h"
+#include "inference_engine_error.h"
+#include "inference_engine_common_impl.h"
+#include "inference_engine_test_common.h"
+
+
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+
+struct TensorInfo {
+    int width;
+    int height;
+    int dim;
+    int ch;
+};
+
+struct InferenceConfig {
+    /**
+    * @brief Default constructor for the @ref InferenceConfig
+    *
+    * @since_tizen 5.0
+    */
+
+    std::string mConfigFilePath; /**< Path of a model configuration file */
+
+    std::string mWeightFilePath; /**< Path of a model weight file */
+
+    std::string mUserFilePath; /**< Path of model user file */
+
+    TensorInfo mTensorInfo; /**< Tensor information */
+
+    //mv_inference_data_type_e mDataType; /**< Data type of a input tensor */
+    inference_tensor_data_type_e mDataType;
+
+    //mv_inference_backend_type_e mBackedType; /**< Backed type of model files */
+    inference_backend_type_e mBackedType; /**< Backed type of model files */
+
+    int mTargetTypes; /**< Target type to run inference */
+
+    double mConfidenceThresHold; /**< Confidence threshold value */
+
+    double mMeanValue; /**< The mean value for normalization */
+
+    double mStdValue; /**< The scale factor value for normalization */
+
+    int mMaxOutputNumbers;
+
+    std::vector<std::string> mInputLayerNames; /**< The input layer names */
+    std::vector<std::string> mOutputLayerNames; /**< The output layer names */
+};
+
+typedef enum {
+       INFERENCE_COLORSPACE_INVALID, /**< The colorspace type is invalid */
+       INFERENCE_COLORSPACE_Y800,    /**< The colorspace type is Y800 */
+       INFERENCE_COLORSPACE_I420,    /**< The colorspace type is I420 */
+       INFERENCE_COLORSPACE_NV12,    /**< The colorspace type is NV12 */
+       INFERENCE_COLORSPACE_YV12,    /**< The colorspace type is YV12 */
+       INFERENCE_COLORSPACE_NV21,    /**< The colorspace type is NV21 */
+       INFERENCE_COLORSPACE_YUYV,    /**< The colorspace type is YUYV */
+       INFERENCE_COLORSPACE_UYVY,    /**< The colorspace type is UYVY */
+       INFERENCE_COLORSPACE_422P,    /**< The colorspace type is 422P */
+       INFERENCE_COLORSPACE_RGB565,  /**< The colorspace type is RGB565 */
+       INFERENCE_COLORSPACE_RGB888,  /**< The colorspace type is RGB888 */
+       INFERENCE_COLORSPACE_RGBA,    /**< The colorspace type is RGBA */
+} inference_colorspace_e;
+
+} /* Cltuner */
+} /* InferenceEngineInterface */
+
+#endif /* __INFERENCE_ENGINE_CLTUNER_H__ */
diff --git a/tools/inference_engine_cltuner.cpp b/tools/inference_engine_cltuner.cpp
deleted file mode 100644 (file)
index be99ada..0000000
+++ /dev/null
@@ -1,391 +0,0 @@
-/**
- * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include <iostream>
-#include <glib.h>
-#include <glib/gprintf.h>
-#include <argp.h>
-#include <string.h>
-#include <tuple>
-#include <map>
-#include <fcntl.h>
-#include <unistd.h>
-#include <queue>
-#include <algorithm>
-
-#include <ctype.h>
-#include <stdlib.h>
-#include <stdio.h>
-
-#include "inference_engine_private_type.h"
-#include "inference_engine_type.h"
-#include "inference_engine_error.h"
-#include "inference_engine_common_impl.h"
-#include "inference_engine_test_common.h"
-
-extern "C"
-{
-#include <dlog.h>
-
-#ifdef LOG_TAG
-#undef LOG_TAG
-#endif
-
-#define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
-}
-#define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
-using namespace InferenceEngineInterface::Common;
-
-static gboolean process();
-GMainLoop *g_loop;
-inference_engine_cltuner cltuner;
-inference_engine_tensor_info input_tensor_info;
-inference_engine_tensor_info output_tensor_info;
-int target_device;
-size_t height;
-size_t width;
-size_t ch;
-int tensor_type;
-int tensor_shape;
-std::vector<std::string> image_paths;
-std::vector<std::string> input_layers;
-std::vector<std::string> output_layers;
-std::vector<std::string> model_paths;
-std::vector<std::string> models;
-inference_engine_layer_property input_property;
-inference_engine_layer_property output_property;
-
-int menu_idx=0;
-
-static void show_menu(const char *title, int idx){
-       g_print("*********************************************\n");
-       g_print("* %38s *\n", title);
-       g_print("*-------------------------------------------*\n");
-       if (idx == 0) {
-               g_print("* %2i. %34s *\n", 0, "INFERENCE_ENGINE_CLTUNER_READ");
-               g_print("* %2i. %34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE");
-               g_print("* %2i. %34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL");
-               g_print("* %2i. %34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID");
-               g_print("* %2c. %34s *\n", 'q', "Exit");
-       } else if (idx == 1) {
-               g_print("* %2i. %34s *\n", 1, "ic_tflite_model.tflite");
-               g_print("* %2i. %34s *\n", 2, "ic_tflite_q_model.tflite");
-               g_print("* %2i. %34s *\n", 3, "od_tflite_model.tflite");
-               g_print("* %2i. %34s *\n", 4, "fd_tflite_model1.tflite");
-               g_print("* %2i. %34s *\n", 5, "ped_tflite_model.tflite");
-               g_print("* %2i. %34s *\n", 6, "posenet1_lite_224.tflite");
-               g_print("* %2i. %34s *\n", 7, "posenet2_lite_224.tflite");
-               g_print("* %2c. %34s *\n", 'q', "Exit");
-       }
-       g_print("*********************************************\n\n");
-}
-
-static gboolean __interpret(char *cmd)
-{
-       g_strsplit(cmd, " ", 0);
-       if(strncmp(cmd, "", 1) != 0) {
-               if (strncmp(cmd, "q", 1) == 0) {
-                       g_main_loop_quit(g_loop);
-               } else {
-                       if (menu_idx == 0) {
-                               cltuner.tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd);
-                               g_print("tuning_mode is %d\n",cltuner.tuning_mode);
-                               menu_idx = 1;
-                               show_menu("Select Model", menu_idx);
-                       } else if (menu_idx == 1) {
-                               int idx = atoi(cmd);
-                               switch (idx) {
-                               case 1 :
-                                       g_print("ic_tflite_model.tflite is selected\n");
-                                       height=224;
-                                       width=224;
-                                       ch=3;
-                                       tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                                       tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
-                                       image_paths.push_back("/opt/usr/images/image_classification.bin");
-                                       input_layers.push_back("input_2");
-                                       output_layers.push_back("dense_3/Softmax");
-                                       model_paths.push_back("/usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
-                                       break;
-                               case 2 :
-                                       g_print("ic_tflite_q_model.tflite is selected\n");
-                                       height=224;
-                                       width=224;
-                                       ch=3;
-                                       tensor_type = INFERENCE_TENSOR_DATA_TYPE_UINT8;
-                                       tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
-                                       image_paths.push_back("/opt/usr/images/image_classification_q.bin");
-                                       input_layers.push_back("input");
-                                       output_layers.push_back("MobilenetV1/Predictions/Reshape_1");
-                                       model_paths.push_back("/usr/share/capi-media-vision/models/IC_Q/tflite/ic_tflite_q_model.tflite");
-                                       break;
-                               case 3 :
-                                       g_print("od_tflite_model.tflite is selected\n");
-                                       height=300;
-                                       width=300;
-                                       ch=3;
-                                       tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                                       tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
-                                       image_paths.push_back("/opt/usr/images/object_detection.bin");
-                                       input_layers.push_back("normalized_input_image_tensor");
-                                       output_layers.push_back("TFLite_Detection_PostProcess");
-                                       output_layers.push_back("TFLite_Detection_PostProcess:1");
-                                       output_layers.push_back("TFLite_Detection_PostProcess:2");
-                                       output_layers.push_back("TFLite_Detection_PostProcess:3");
-                                       model_paths.push_back("/usr/share/capi-media-vision/models/OD/tflite/od_tflite_model.tflite");
-                                       break;
-                               case 4 :
-                                       g_print("fd_tflite_model1.tflite is selected\n");
-                                       height=300;
-                                       width=300;
-                                       ch=3;
-                                       tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                                       tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
-                                       image_paths.push_back("/opt/usr/images/face_detection.bin");
-                                       input_layers.push_back("normalized_input_image_tensor");
-                                       output_layers.push_back("TFLite_Detection_PostProcess");
-                                       output_layers.push_back("TFLite_Detection_PostProcess:1");
-                                       output_layers.push_back("TFLite_Detection_PostProcess:2");
-                                       output_layers.push_back("TFLite_Detection_PostProcess:3");
-                                       model_paths.push_back("/usr/share/capi-media-vision/models/FD/tflite/fd_tflite_model1.tflite");
-                                       break;
-                               case 5 :
-                                       g_print("ped_tflite_model.tflite is selected\n");
-                                       height=192;
-                                       width=192;
-                                       ch=3;
-                                       tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                                       tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
-                                       image_paths.push_back("/opt/usr/images/pose_estimation.bin");
-                                       input_layers.push_back("image");
-                                       output_layers.push_back("Convolutional_Pose_Machine/stage_5_out");
-                                       model_paths.push_back("/usr/share/capi-media-vision/models/PE/tflite/ped_tflite_model.tflite");
-                                       break;
-                               case 6 :
-                                       g_print("posenet1_lite_224.tflite is selected\n");
-                                       height=224;
-                                       width=224;
-                                       ch=3;
-                                       tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                                       tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
-                                       image_paths.push_back("/opt/usr/images/hand.bin");
-                                       input_layers.push_back("input");
-                                       output_layers.push_back("mobilenetv2/boundingbox");
-                                       output_layers.push_back("mobilenetv2/heatmap");
-                                       model_paths.push_back("/usr/share/capi-media-vision/models/PE_1/tflite/posenet1_lite_224.tflite");
-                                       break;
-                               case 7 :
-                                       g_print("posenet2_lite_224.tflite is selected\n");
-                                       height=56;
-                                       width=56;
-                                       ch=21;
-                                       tensor_type = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
-                                       tensor_shape = INFERENCE_TENSOR_SHAPE_NCHW;
-                                       image_paths.push_back("/opt/usr/images/hand.bin");
-                                       input_layers.push_back("input");
-                                       output_layers.push_back("mobilenetv2/coord_refine");
-                                       output_layers.push_back("mobilenetv2/gesture");
-                                       model_paths.push_back("/usr/share/capi-media-vision/models/PE_2/tflite/posenet2_lite_224.tflite");
-                                       break;
-                               default :
-                                       break;
-                               }
-
-                               inference_engine_tensor_info _input_tensor_info = {
-                                       { 1, ch, height, width },
-                                       (inference_tensor_shape_type_e)tensor_shape,
-                                       static_cast<inference_tensor_data_type_e>(tensor_type),
-                                       static_cast<size_t>(1 * ch * height * width)
-                               };
-
-                               inference_engine_tensor_info _output_tensor_info = {
-                                       std::vector<size_t>{1},
-                                       (inference_tensor_shape_type_e)tensor_shape,
-                                       (inference_tensor_data_type_e)tensor_type,
-                                       1
-                               };
-                               input_tensor_info = _input_tensor_info;
-                               output_tensor_info = _output_tensor_info;
-                               if (!process()) {
-                                       g_print("Error is occurred while doing process.\n ");
-                                       return FALSE;
-                               }
-
-                       } else {
-                               g_print("unknown menu_idx [%d]", menu_idx);
-                       }
-               }
-       } else {
-               g_print("Please input command.\n");
-       }
-       return TRUE;
-}
-
-void CheckResult(){
-       std::string tune_file = model_paths[0];
-       tune_file.append(".tune");
-       int fd = open(tune_file.c_str(), O_RDONLY);
-       if (fd == -1) {
-               g_print("tune file generation failed!!\n");
-               return;
-       }
-       off_t fsize;
-       fsize = lseek(fd, 0, SEEK_END);
-       g_print("************TUNE FILE GENERATED**************\n");
-       g_print("Location \n[%s] \nSize \n[%jd]\n", tune_file.c_str(), static_cast<intmax_t>(fsize));
-       g_print("*-------------------------------------------*\n\n\n");
-       close(fd);
-
-       show_menu("Select Tuning Mode", menu_idx);
-}
-static gboolean __input(GIOChannel *channel,
-                                       GIOCondition  cond,
-                                       gpointer      data)
-{
-       char buf[200];
-       gsize read;
-       GError *error = NULL;
-       if (data != nullptr) {
-               g_print("data: %p \n",data);
-               return FALSE;
-       }
-       if (cond == G_IO_ERR) {
-               g_print("G_IO_ERR is occurred. \n");
-               return FALSE;
-       }
-
-       g_io_channel_read_chars(channel, buf, 200, &read, &error);
-       buf[read] = '\0';
-       g_strstrip(buf);
-       if (!__interpret(buf)) {
-               g_print("Error is occurred while doing __interpret.\n ");
-               return FALSE;
-       }
-       return TRUE;
-}
-
-static gboolean process(){
-       InferenceEngineCommon *mBackend;
-       inference_engine_config config = {
-               .backend_name = "armnn",
-               .backend_type = INFERENCE_BACKEND_ARMNN,
-               .target_devices = INFERENCE_TARGET_GPU
-       };
-
-       int ret = 0;
-       mBackend = new InferenceEngineCommon();
-       ret = mBackend->LoadConfigFile();
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("LoadConfigFile(); failed");
-               return FALSE;
-       }
-       ret = mBackend->BindBackend(&config);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("BindBackend failed");
-               return FALSE;
-       }
-       inference_engine_capacity capacity;
-       ret = mBackend->GetBackendCapacity(&capacity);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("GetBackendCapacity failed");
-               return FALSE;
-       }
-
-       mBackend->SetCLTuner(&cltuner);
-
-       ret = mBackend->SetTargetDevices(config.target_devices);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("SetTargetDevices failed");
-               return FALSE;
-       }
-
-       int model_type = GetModelInfo(model_paths, models);
-       if (model_type <= INFERENCE_MODEL_NONE) {
-               LOGE("GetModelInfo failed");
-               return FALSE;
-       }
-
-       for (auto& input : input_layers) {
-               input_property.layers.insert(std::make_pair(input, input_tensor_info));
-       }
-
-       ret = mBackend->SetInputLayerProperty(input_property);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("SetInputLayerProperty failed");
-               return FALSE;
-       }
-
-
-       for (auto& layer : output_layers) {
-               output_property.layers.insert(std::make_pair(layer, output_tensor_info));
-       }
-
-       ret = mBackend->SetOutputLayerProperty(output_property);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("SetOutputLayerProperty failed");
-               return FALSE;
-       }
-
-       ret = mBackend->Load(models, (inference_model_format_e) model_type);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("Load failed");
-               return FALSE;
-       }
-
-       IETensorBuffer inputs, outputs;
-       ret = PrepareTensorBuffers(mBackend, inputs, outputs);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("PrepareTensorBuffers failed");
-               return FALSE;
-       }
-       int imageIndex = 0;
-       for (auto& input : inputs) {
-               CopyFileToMemory(image_paths[imageIndex++].c_str(), input.second, input.second.size);
-       }
-
-       ret = mBackend->Run(inputs, outputs);
-       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
-               LOGE("Run failed");
-               return FALSE;
-       }
-
-       CleanupTensorBuffers(inputs, outputs);
-       CheckResult();
-       mBackend->UnbindBackend();
-       models.clear();
-
-       return TRUE;
-}
-
-int main ()
-{
-       int ret = 0;
-       GIOChannel *stdin_channel;
-       stdin_channel = g_io_channel_unix_new(0);
-       g_io_channel_set_flags(stdin_channel, G_IO_FLAG_NONBLOCK, NULL);
-       g_io_add_watch(stdin_channel, G_IO_IN, (GIOFunc)__input, NULL);
-
-       cltuner.active = true;
-       cltuner.update = true;
-
-
-       show_menu("Select Tuning Mode", menu_idx);
-       g_loop = g_main_loop_new(NULL, FALSE);
-       g_main_loop_run(g_loop);
-
-       return ret;
-}
diff --git a/tools/src/CMakeLists.txt b/tools/src/CMakeLists.txt
new file mode 100644 (file)
index 0000000..68becae
--- /dev/null
@@ -0,0 +1,43 @@
+project(inference_engine_test)
+
+set(INFERENCE_ENGINE_CLTUNER inference_engine_cltuner)
+
+set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS_DEBUG _DEBUG)
+
+find_package(GTest REQUIRED)
+set(GTEST_LIBRARY gtest)
+
+pkg_check_modules(${INFERENCE_ENGINE_CLTUNER} REQUIRED glib-2.0 json-glib-1.0)
+FOREACH(flag ${${INFERENCE_ENGINE_CLTUNER}_CFLAGS})
+    SET(EXTRA_CFLAGS "${EXTRA_CFLAGS} ${flag}")
+ENDFOREACH(flag)
+
+if(NOT SKIP_WARNINGS)
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${EXTRA_CFLAGS} -Wall -Wextra -Werror")
+endif()
+
+set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tools/bin/)
+
+include_directories(${PROJECT_SOURCE_DIR})
+include_directories(${CMAKE_BINARY_DIR}/test/src)
+include_directories(${CMAKE_BINARY_DIR}/include)
+include_directories(${CMAKE_BINARY_DIR}/tools/include)
+include_directories(/usr/include/gtest)
+include_directories(${GLIB_INCLUDE_DIRS})
+
+file(GLOB CLTUNER_INC_HEADER_LIST "${PROJECT_SOURCE_DIR}/*.h")
+file(GLOB CLTUNER_INC_FILE_LIST "${PROJECT_SOURCE_DIR}/*.cpp")
+
+add_executable(${INFERENCE_ENGINE_CLTUNER}
+               ${CLTUNER_INC_HEADER_LIST}
+               ${CLTUNER_INC_FILE_LIST}
+               ${CMAKE_BINARY_DIR}/test/src/inference_engine_test_common.cpp
+               )
+
+target_link_libraries(${INFERENCE_ENGINE_CLTUNER} ${GTEST_LIBRARY}
+                                      ${GTEST_MAIN_LIBRARY}
+                                      ${INFERENCE_ENGINE_INTERFACE_LIB_NAME}
+                                      ${${INFERENCE_ENGINE_CLTUNER}_LDFLAGS}
+                                      glib-2.0
+                                      dl
+                                      )
\ No newline at end of file
diff --git a/tools/src/InputMetadata.cpp b/tools/src/InputMetadata.cpp
new file mode 100644 (file)
index 0000000..d4c3c5d
--- /dev/null
@@ -0,0 +1,251 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <fstream>
+#include <string>
+#include <queue>
+#include <algorithm>
+#include "InputMetadata.h"
+
+extern "C"
+{
+
+#ifdef LOG_TAG
+#undef LOG_TAG
+#endif
+#define MAX_STR 256
+#define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
+}
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+       InputMetadata::InputMetadata() :
+                       parsed(false),
+                       layer(),
+                       option()
+       {
+               // shape_type
+               mSupportedShapeType.insert({"NCHW", INFERENCE_TENSOR_SHAPE_NCHW});
+               mSupportedShapeType.insert({"NHWC", INFERENCE_TENSOR_SHAPE_NHWC});
+
+               // data_type
+               mSupportedDataType.insert({"FLOAT32", INFERENCE_TENSOR_DATA_TYPE_FLOAT32});
+               mSupportedDataType.insert({"UINT8", INFERENCE_TENSOR_DATA_TYPE_UINT8});
+
+               // color_space
+               mSupportedColorSpace.insert({"RGB888", INFERENCE_COLORSPACE_RGB888});
+               mSupportedColorSpace.insert({"GRAY8", INFERENCE_COLORSPACE_Y800});
+       }
+
+       template <typename T>
+       T InputMetadata::GetSupportedType(JsonObject* root, std::string typeName,
+                                                                       std::map<std::string, T>& supportedTypes)
+       {
+               auto supportedType = supportedTypes.find(json_object_get_string_member(root, typeName.c_str()));
+               if (supportedType == supportedTypes.end()) {
+                       throw std::invalid_argument(typeName);
+               }
+
+               LOGI("%s: %d:%s", typeName.c_str(), supportedType->second, supportedType->first.c_str());
+
+               return supportedType->second;
+       }
+
+       int InputMetadata::GetTensorInfo(JsonObject *root)
+       {
+               LOGI("ENTER");
+
+               if (!json_object_has_member(root, "tensor_info")) {
+                       LOGE("No tensor_info inputmetadata");
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               // tensor_info
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
+               JsonArray * rootArray = json_object_get_array_member(root, "tensor_info");
+               unsigned int elements = json_array_get_length(rootArray);
+
+               std::map<std::string, LayerInfo>().swap(layer);
+               // TODO: handling error
+               // FIXEME: LayerInfo.set()??
+               for (unsigned int elem = 0; elem < elements; ++elem) {
+                       LayerInfo info;
+                       JsonNode *pNode = json_array_get_element(rootArray, elem);
+                       JsonObject *pObject = json_node_get_object(pNode);
+
+                       info.name =
+                                       static_cast<const char*>(json_object_get_string_member(pObject,"name"));
+                       LOGI("layer: %s", info.name.c_str());
+
+                       try {
+                               info.shapeType = GetSupportedType(pObject, "shape_type", mSupportedShapeType);
+                               info.dataType = GetSupportedType(pObject, "data_type", mSupportedDataType);
+                               info.colorSpace = GetSupportedType(pObject, "color_space", mSupportedColorSpace);
+                       } catch (const std::exception& e) {
+                               LOGE("Invalid %s", e.what());
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       // dims
+                       JsonArray * array = json_object_get_array_member(pObject, "shape_dims");
+                       unsigned int elements2 = json_array_get_length(array);
+                       LOGI("shape dim: size[%u]", elements2);
+                       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+                               auto dim = static_cast<int>(json_array_get_int_element(array, elem2));
+                               info.dims.push_back(dim);
+                               LOGI("%d", dim);
+                       }
+
+                       layer.insert(std::make_pair(info.name, info));
+               }
+
+               LOGI("LEAVE");
+
+               return ret;
+       }
+
+       int InputMetadata::GetPreProcess(JsonObject *root)
+       {
+               LOGI("ENTER");
+
+               if (!json_object_has_member(root, "preprocess")) {
+                       LOGI("No preprocess inputmetadata");
+                       return INFERENCE_ENGINE_ERROR_NONE;
+               }
+
+               // preprocess
+               JsonArray * rootArray = json_object_get_array_member(root, "preprocess");
+               unsigned int elements = json_array_get_length(rootArray);
+
+               std::map<std::string, Options>().swap(option);
+               // TODO: iterLayer should be the same with elements.
+               auto iterLayer = layer.begin();
+               // TODO: handling error
+               for (unsigned int elem = 0; elem < elements; ++elem, ++iterLayer) {
+                       Options opt;
+                       JsonNode *pNode = json_array_get_element(rootArray, elem);
+                       JsonObject *pObject = json_node_get_object(pNode);
+
+                       // normalization
+                       if (json_object_has_member(pObject, "normalization")) {
+                               JsonArray * array = json_object_get_array_member(pObject, "normalization");
+                               JsonNode *  node = json_array_get_element(array, 0);
+                               JsonObject * object = json_node_get_object(node);
+
+                               opt.normalization.use = true;
+                               LOGI("use normalization");
+
+                               JsonArray * arrayMean = json_object_get_array_member(object, "mean");
+                               JsonArray * arrayStd = json_object_get_array_member(object, "std");
+                               unsigned int elemMean = json_array_get_length(arrayMean);
+                               unsigned int elemStd = json_array_get_length(arrayStd);
+                               if (elemMean != elemStd) {
+                                       LOGE("Invalid mean and std values");
+                                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                               }
+
+                               for (unsigned int elem = 0; elem < elemMean; ++elem) {
+                                       auto m = static_cast<double>(json_array_get_double_element(arrayMean, elem));
+                                       auto s = static_cast<double>(json_array_get_double_element(arrayStd, elem));
+                                       opt.normalization.mean.push_back(m);
+                                       opt.normalization.std.push_back(s);
+                                       LOGI("%u: mean[%3.2f], std[%3.2f]", elem, m, s);
+                               }
+                       }
+
+                       if (json_object_has_member(pObject, "quantization")) {
+                               JsonArray * array = json_object_get_array_member(pObject, "quantization");
+                               JsonNode *  node = json_array_get_element(array, 0);
+                               JsonObject * object = json_node_get_object(node);
+
+                               opt.quantization.use = true;
+                               LOGI("use quantization");
+
+                               JsonArray * arrayScale = json_object_get_array_member(object, "scale");
+                               JsonArray * arrayZero = json_object_get_array_member(object, "zeropoint");
+                               unsigned int elemScale = json_array_get_length(arrayScale);
+                               unsigned int elemZero= json_array_get_length(arrayZero);
+                               if (elemScale != elemZero) {
+                                       LOGE("Invalid scale and zero values");
+                                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                               }
+
+                               for (unsigned int elem = 0; elem < elemScale; ++elem) {
+                                       auto s = static_cast<double>(json_array_get_double_element(arrayScale, elem));
+                                       auto z = static_cast<double>(json_array_get_double_element(arrayZero, elem));
+                                       opt.quantization.scale.push_back(s);
+                                       opt.quantization.zeropoint.push_back(z);
+                                       LOGI("%u: scale[%3.2f], zeropoint[%3.2f]", elem, s, z);
+                               }
+                       }
+                       option.insert(std::make_pair(iterLayer->first, opt));
+               }
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int InputMetadata::Parse(JsonObject *root)
+       {
+               LOGI("ENTER");
+
+               int ret = GetTensorInfo(root);
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("Fail to GetTensorInfo[%d]", ret);
+                       return ret;
+               }
+
+               ret = GetPreProcess(root);
+               if (ret != INFERENCE_ENGINE_ERROR_NONE)  {
+                       LOGE("Fail to GetPreProcess[%d]", ret);
+                       return ret;
+               }
+
+               parsed = true;
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int LayerInfo::GetWidth() const {
+               if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+                       return dims[3];
+               } else { // INFERENCE_TENSOR_SHAPE_NWHC
+                       return dims[1];
+               }
+       }
+
+       int LayerInfo::GetHeight() const {
+               if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+                       return dims[2];
+               } else { // INFERENCE_TENSOR_SHAPE_NWHC
+                       return dims[2];
+               }
+       }
+
+       int LayerInfo::GetChannel() const {
+               if (shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+                       return dims[1];
+               } else { // INFERENCE_TENSOR_SHAPE_NWHC
+                       return dims[3];
+               }
+       }
+
+} /* Inference */
+} /* MediaVision */
diff --git a/tools/src/Metadata.cpp b/tools/src/Metadata.cpp
new file mode 100644 (file)
index 0000000..7105e31
--- /dev/null
@@ -0,0 +1,129 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Metadata.h"
+
+#include <map>
+
+#include <unistd.h>
+#include <fstream>
+#include <string>
+#include <queue>
+#include <algorithm>
+
+extern "C"
+{
+
+#ifdef LOG_TAG
+#undef LOG_TAG
+#endif
+#define MAX_STR 256
+#define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
+}
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+       int Metadata::Init(const std::string& filename)
+       {
+               LOGI("ENTER");
+
+               if (access(filename.c_str(), F_OK | R_OK)) {
+                       LOGE("meta file is in [%s] ", filename.c_str());
+                       return INFERENCE_ENGINE_ERROR_INVALID_PATH;
+               }
+
+               mMetafile = filename;
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int Metadata::Parse()
+       {
+               LOGI("ENTER");
+
+               if (mMetafile.empty()) {
+                       LOGE("meta file is empty");
+                       return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+               }
+
+               GError *error = NULL;
+               JsonNode *node = NULL;
+               JsonObject *object = NULL;
+               int ret = INFERENCE_ENGINE_ERROR_NONE;
+
+               JsonParser *parser = json_parser_new();
+               if (parser == NULL) {
+                       LOGE("Fail to create json parser");
+                       return INFERENCE_ENGINE_ERROR_OUT_OF_MEMORY;
+               }
+
+               gboolean jsonRet = json_parser_load_from_file(parser, mMetafile.c_str(), &error);
+               if (!jsonRet) {
+                       LOGE("Unable to parser file %s by %s", mMetafile.c_str(),
+                                                               error == NULL ? "Unknown" : error->message);
+                       g_error_free(error);
+                       ret =  INFERENCE_ENGINE_ERROR_INVALID_DATA;
+                       goto _ERROR_;
+               }
+
+               node = json_parser_get_root(parser);
+               if (JSON_NODE_TYPE(node) != JSON_NODE_OBJECT) {
+                       LOGE("Fail to json_parser_get_root. It's an incorrect markup");
+                       ret =  INFERENCE_ENGINE_ERROR_INVALID_DATA;
+                       goto _ERROR_;
+               }
+
+               object = json_node_get_object(node);
+               if (!object) {
+                       LOGE("Fail to json_node_get_object. object is NULL");
+                       ret =  INFERENCE_ENGINE_ERROR_INVALID_DATA;
+                       goto _ERROR_;
+               }
+
+               ret = mInputMeta.Parse(json_object_get_object_member(object, "inputmetadata"));
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("Fail to parse input Meta[%d]",ret);
+                       goto _ERROR_;
+               }
+
+               ret = mOutputMeta.Parse(json_object_get_object_member(object, "outputmetadata"));
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("Fail to parse output meta[%d]",ret);
+                       goto _ERROR_;
+               }
+
+       _ERROR_ :
+               g_object_unref(parser);
+               parser = NULL;
+               LOGI("LEAVE");
+
+               return ret;
+       }
+
+       InputMetadata& Metadata::GetInputMeta()
+       {
+               return mInputMeta;
+       }
+
+       OutputMetadata& Metadata::GetOutputMeta()
+       {
+               return mOutputMeta;
+       }
+} /* Inference */
+} /* MediaVision */
diff --git a/tools/src/OutputMetadata.cpp b/tools/src/OutputMetadata.cpp
new file mode 100755 (executable)
index 0000000..5f0f0a2
--- /dev/null
@@ -0,0 +1,169 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <fstream>
+#include <string>
+#include <queue>
+#include <algorithm>
+#include "OutputMetadata.h"
+
+namespace InferenceEngineInterface
+{
+namespace Cltuner
+{
+       std::map<std::string, inference_tensor_shape_type_e> OutputMetadata::supportedTensorShapes =
+               {{"NCHW", INFERENCE_TENSOR_SHAPE_NCHW}, {"NHWC", INFERENCE_TENSOR_SHAPE_NHWC}};
+
+       OutputMetadata::OutputMetadata() :
+                       parsed(false),
+                       score()
+       {
+
+       }
+
+       ScoreInfo::ScoreInfo() :
+                       name(),
+                       dimInfo(),
+                       threshold(0.0),
+                       type(INFERENCE_SCORE_TYPE_NORMAL),
+                       topNumber(1),
+                       deQuantization(nullptr)
+       {
+               // Score type
+               supportedScoreTypes.insert({"NORMAL", INFERENCE_SCORE_TYPE_NORMAL});
+               supportedScoreTypes.insert({"SIGMOID", INFERENCE_SCORE_TYPE_SIGMOID});
+       }
+
+       template <typename T>
+       T OutputMetadata::GetSupportedType(JsonObject* root, std::string typeName,
+                                                                       std::map<std::string, T>& supportedTypes)
+       {
+               auto supportedType = supportedTypes.find(json_object_get_string_member(root, typeName.c_str()));
+               if (supportedType == supportedTypes.end()) {
+                       throw std::invalid_argument(typeName);
+               }
+
+               LOGI("%s: %d:%s", typeName.c_str(), supportedType->second, supportedType->first.c_str());
+
+               return supportedType->second;
+       }
+
+       int ScoreInfo::ParseScore(JsonObject *root)
+       {
+               LOGI("ENTER");
+
+               JsonArray * rootArray = json_object_get_array_member(root, "score");
+               unsigned int elements = json_array_get_length(rootArray);
+
+               for (unsigned int elem = 0; elem < elements; ++elem) {
+                       JsonNode *pNode = json_array_get_element(rootArray, elem);
+                       JsonObject *pObject = json_node_get_object(pNode);
+
+                       name = json_object_get_string_member(pObject,"name");
+                       LOGI("layer: %s", name.c_str());
+
+                       JsonArray * array = json_object_get_array_member(pObject, "index");
+                       unsigned int elements2 = json_array_get_length(array);
+                       LOGI("range dim: size[%u]", elements2);
+                       for (unsigned int elem2 = 0; elem2 < elements2; ++elem2) {
+                               if (static_cast<int>(json_array_get_int_element(array, elem2)) == 1)
+                                       dimInfo.SetValidIndex(elem2);
+                       }
+
+                       topNumber = static_cast<int>(json_object_get_int_member(pObject, "top_number"));
+                       LOGI("top number: %d", topNumber);
+
+                       threshold = static_cast<double>(json_object_get_double_member(pObject, "threshold"));
+                       LOGI("threshold: %1.3f", threshold);
+
+                       try {
+                               type = OutputMetadata::GetSupportedType(pObject, "score_type", supportedScoreTypes);
+                       } catch (const std::exception& e) {
+                               LOGE("Invalid %s", e.what());
+                               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+                       }
+
+                       if (json_object_has_member(pObject, "dequantization")) {
+                               array = json_object_get_array_member(pObject, "dequantization");
+                               JsonNode *node = json_array_get_element(array, 0);
+                               JsonObject *object = json_node_get_object(node);
+
+                               deQuantization = std::make_shared<DeQuantization>(
+                                       json_object_get_double_member(object, "scale"),
+                                       json_object_get_double_member(object, "zeropoint"));
+                       }
+               }
+
+               LOGI("LEAVE");
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       int OutputMetadata::ParseScore(JsonObject *root)
+       {
+               if (!json_object_has_member(root, "score")) {
+                       LOGI("No score outputmetadata");
+                       return INFERENCE_ENGINE_ERROR_NONE;
+               }
+
+               return score.ParseScore(root);
+       }
+
+       ScoreInfo& OutputMetadata::GetScore()
+       {
+               return score;
+       }
+
+       bool OutputMetadata::IsParsed()
+       {
+               return parsed;
+       }
+
+       int OutputMetadata::Parse(JsonObject *root)
+       {
+               LOGI("ENTER");
+
+               int ret = ParseScore(root);
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("Fail to GetScore[%d]", ret);
+                       return ret;
+               }
+
+               parsed = true;
+
+               LOGI("LEAVE");
+
+               return INFERENCE_ENGINE_ERROR_NONE;
+       }
+
+       void DimInfo::SetValidIndex(int index)
+       {
+               LOGI("ENTER");
+
+               dims.push_back(index);
+
+               LOGI("LEAVE");
+       }
+
+       std::vector<int> DimInfo::GetValidIndexAll() const
+       {
+               LOGI("ENTER");
+
+               LOGI("LEAVE");
+               return dims;
+       }
+} /* Inference */
+} /* MediaVision */
diff --git a/tools/src/inference_engine_cltuner.cpp b/tools/src/inference_engine_cltuner.cpp
new file mode 100644 (file)
index 0000000..b18108e
--- /dev/null
@@ -0,0 +1,542 @@
+/**
+ * Copyright (c) 2021 Samsung Electronics Co., Ltd All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <glib.h>
+#include <glib/gprintf.h>
+#include <json-glib/json-glib.h>
+#include <random>
+
+#include <string.h>
+#include <tuple>
+#include <map>
+#include <fcntl.h>
+#include <unistd.h>
+#include <queue>
+#include <algorithm>
+#include <fstream>
+#include <chrono>
+
+#include "inference_engine_cltuner.h"
+#include "Metadata.h"
+
+extern "C"
+{
+
+#ifdef LOG_TAG
+#undef LOG_TAG
+#endif
+#define MAX_STR 256
+#define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
+}
+#define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
+#define MAX_WIDTH 1024
+#define MAX_HEIGHT 1024
+#define MAX_CHANNELS 3
+#define MAX_INFERENCE_COUNT 10
+
+using namespace InferenceEngineInterface::Common;
+using namespace InferenceEngineInterface::Cltuner;
+
+InferenceEngineCommon *mBackend;
+inference_engine_cltuner cltuner;
+
+char* model_path;
+char* json_path;
+bool is_generated = false;
+float rand_tensor[MAX_WIDTH][MAX_HEIGHT][MAX_CHANNELS];
+std::vector<std::string> model_paths;
+std::vector<std::string> models;
+tensor_t tuned_tensor;
+tensor_t orig_tensor;
+Metadata mMetadata;
+InferenceConfig mConfig;
+
+int ConfigureInputInfo()
+{
+       LOGI("ENTER");
+
+       const InputMetadata& inputMeta = mMetadata.GetInputMeta();
+
+       if (!inputMeta.parsed) {
+               LOGE("No meta data parsed.");
+               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+       }
+
+       auto& layerInfo = inputMeta.layer.begin()->second;
+
+       if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
+               mConfig.mTensorInfo.ch = layerInfo.dims[1];
+               mConfig.mTensorInfo.dim = layerInfo.dims[0];
+               mConfig.mTensorInfo.width = layerInfo.dims[3];
+               mConfig.mTensorInfo.height = layerInfo.dims[2];
+       } else if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NHWC) {
+               mConfig.mTensorInfo.ch = layerInfo.dims[3];
+               mConfig.mTensorInfo.dim = layerInfo.dims[0];
+               mConfig.mTensorInfo.width = layerInfo.dims[2];
+               mConfig.mTensorInfo.height = layerInfo.dims[1];
+       } else {
+               LOGE("Invalid shape type[%d]", layerInfo.shapeType);
+               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+       }
+
+       if (!inputMeta.option.empty()) {
+               auto& option = inputMeta.option.begin()->second;
+               if (option.normalization.use) {
+                       mConfig.mMeanValue = option.normalization.mean[0];
+                       mConfig.mStdValue = option.normalization.std[0];
+               }
+       }
+
+       if(layerInfo.dataType == 0)
+               mConfig.mDataType = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+       else
+               mConfig.mDataType = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+
+       mConfig.mInputLayerNames.clear();
+
+       for (auto& layer : inputMeta.layer)
+               mConfig.mInputLayerNames.push_back(layer.first);
+
+       inference_engine_layer_property property;
+
+       for (auto& name : mConfig.mInputLayerNames) {
+               inference_engine_tensor_info tensor_info;
+
+               tensor_info.data_type = mConfig.mDataType;
+               tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
+               tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
+               tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
+               tensor_info.shape.push_back(mConfig.mTensorInfo.height);
+               tensor_info.shape.push_back(mConfig.mTensorInfo.width);
+               tensor_info.size = 1;
+
+               for (auto& dim : tensor_info.shape)
+                       tensor_info.size *= dim;
+
+               property.layers.insert(std::make_pair(name, tensor_info));
+       }
+
+       int ret = mBackend->SetInputLayerProperty(property);
+
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to set input layer property");
+               return ret;
+       }
+
+       LOGI("LEAVE");
+       return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+int ConfigureOutputInfo()
+{
+       LOGI("ENTER");
+
+       OutputMetadata& outputMeta = mMetadata.GetOutputMeta();
+
+       if (!outputMeta.IsParsed()) {
+               LOGE("No meta data parsed.");
+               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+       }
+
+       mConfig.mOutputLayerNames.clear();
+       if (!outputMeta.GetScore().GetName().empty())
+               mConfig.mOutputLayerNames.push_back(outputMeta.GetScore().GetName());
+
+       inference_engine_layer_property property;
+       inference_engine_tensor_info tensor_info = { std::vector<size_t>{1},
+                                               INFERENCE_TENSOR_SHAPE_NCHW,
+                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
+                                               1};
+
+       for (auto& name : mConfig.mOutputLayerNames) {
+               LOGI("Configure %s layer as output", name.c_str());
+               property.layers.insert(std::make_pair(name, tensor_info));
+       }
+
+       int ret = mBackend->SetOutputLayerProperty(property);
+
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to set output layer property");
+               return ret;
+       }
+
+       LOGI("LEAVE");
+       return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+
+int ParseMetadata(std::string filePath)
+{
+       LOGI("ENTER");
+       LOGI("filePath : %s", filePath.c_str());
+
+       int ret = mMetadata.Init(filePath);
+
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to init metadata[%d]", ret);
+               return ret;
+       }
+
+       ret = mMetadata.Parse();
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Fail to parse metadata[%d]", ret);
+               return ret;
+       }
+
+       LOGI("LEAVE");
+       return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+void _FillOutputResult(InferenceEngineCommon *engine,
+                                         IETensorBuffer &outputs,
+                                         tensor_t &outputData)
+{
+       inference_engine_layer_property property;
+
+       engine->GetOutputLayerProperty(property);
+
+       for (auto& layer : property.layers) {
+               const inference_engine_tensor_info& tensor_info = layer.second;
+               std::vector<int> tmpDimInfo;
+
+               for (auto& dim : tensor_info.shape) {
+                       LOGE("dim size %d", dim);
+                       tmpDimInfo.push_back(dim);
+               }
+
+               outputData.dimInfo.push_back(tmpDimInfo);
+
+               if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
+                       auto *ori_buf = static_cast<unsigned char *>(outputs[layer.first].buffer);
+                       float *new_buf = new float[tensor_info.size];
+
+                       for (int j = 0; j < (int) tensor_info.size; j++) {
+                               new_buf[j] = (float) ori_buf[j] / 255.0f;
+                       }
+
+                       // replace original buffer with new one, and release origin one.
+                       outputs[layer.first].buffer = new_buf;
+                       if (!outputs[layer.first].owner_is_backend) {
+                               delete[] ori_buf;
+                       }
+               }
+
+               LOGE("tensor_info.data_type  %d", tensor_info.data_type );
+               outputData.data.push_back(static_cast<void *>(outputs[layer.first].buffer));
+       }
+}
+
+static void printTensor(tensor_t &outputData) {
+       std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
+       std::vector<void *> inferResults(outputData.data.begin(),
+                                                                       outputData.data.end());
+       int count = inferDimInfo[0][1];
+       int idx= -1;
+       float value = 0.0f;
+       float *prediction = reinterpret_cast<float *>(inferResults[0]);
+
+       for (int i = 0; i < count; ++i) {
+               LOGE(" prediction[%d] %f",i, prediction[i]);
+               if (value < prediction[i]) {
+                       value = prediction[i];
+                       idx = i;
+               }
+       }
+
+       LOGE("Best Prediction  : prediction[%d] : %f ", idx, value);
+}
+
+static void show_menu(const char *title) {
+       g_print("*******************************************\n");
+       g_print("*  %-38s *\n", title);
+       g_print("*-----------------------------------------*\n");
+       g_print("*  %-38s *\n", "Input Tuning mode and Model file");
+       g_print("*  %-38s *\n", "ex)1 /usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
+       g_print("*  %-38s *\n", "**caution**");
+       g_print("*  %-38s *\n", "'READ' mode should be executed");
+       g_print("*  %-38s *\n", "after generating tune file.");
+       g_print("*-----------------------------------------*\n");
+       g_print("*  %-38s *\n", "[MODE LIST]");
+       g_print("* %2i. %-34s *\n", 0, "INFERENCE_ENGINE_CLTUNER_READ ");
+       g_print("* %2i. %-34s *\n", 1, "INFERENCE_ENGINE_CLTUNER_EXHAUSTIVE");
+       g_print("* %2i. %-34s *\n", 2, "INFERENCE_ENGINE_CLTUNER_NORMAL    ");
+       g_print("* %2i. %-34s *\n", 3, "INFERENCE_ENGINE_CLTUNER_RAPID     ");
+       g_print("*-----------------------------------------*\n");
+       g_print("* %2c. %34s *\n", 'q', "Exit  ");
+       g_print("*******************************************\n\n");
+}
+
+int CheckTuneFile() {
+       std::string tune_file = model_paths[0];
+
+       tune_file.append(".tune");
+
+       int fd = open(tune_file.c_str(), O_RDONLY);
+
+       if (fd == -1) {
+               g_print("Tune file open failed!! (It could be genereation failure.)\n");
+               return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
+       }
+
+       off_t fsize;
+
+       fsize = lseek(fd, 0, SEEK_END);
+       g_print("************TUNE FILE GENERATED**************\n");
+       g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), (long long)fsize);
+       g_print("*-------------------------------------------*\n\n\n");
+       close(fd);
+
+       return INFERENCE_ENGINE_ERROR_NONE;
+}
+
+void CopyRandomMatrixToMemory(inference_engine_tensor_buffer &buffer, unsigned int size) {
+       std::random_device rd;
+       std::mt19937 generator(rd());
+       std::uniform_real_distribution<> distribution(1.0, 255.0);
+
+       for (int i=0; i<mConfig.mTensorInfo.height; i++) {
+               for (int j=0; j<mConfig.mTensorInfo.width; j++) {
+                       for (int k=0; k<mConfig.mTensorInfo.ch; k++) {
+                               rand_tensor[i][j][k] = distribution(generator);
+                       }
+               }
+       }
+
+       memcpy(buffer.buffer, rand_tensor, size);
+}
+
+static gboolean process(tensor_t& result_tensor, bool is_supported, bool is_actived, bool is_updated, inference_engine_cltuner_mode_e mode) {
+
+       inference_engine_config config = {
+               .backend_name = "armnn",
+               .backend_type = INFERENCE_BACKEND_ARMNN,
+               .target_devices = INFERENCE_TARGET_GPU
+       };
+
+       mBackend = new InferenceEngineCommon();
+
+       int ret = mBackend->EnableProfiler(true);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("EnableProfiler(); failed");
+               return FALSE;
+       }
+
+       ret = mBackend->LoadConfigFile();
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("LoadConfigFile(); failed");
+               return FALSE;
+       }
+
+       ret = mBackend->BindBackend(&config);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("BindBackend failed");
+               return FALSE;
+       }
+
+       inference_engine_capacity capacity;
+
+       ret = mBackend->GetBackendCapacity(&capacity);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("GetBackendCapacity failed");
+               return FALSE;
+       }
+
+       if (capacity.cltuner_supported && is_supported) {
+               LOGE("cltuner is set");
+               cltuner.active = is_actived;
+               cltuner.update = is_updated;
+               cltuner.tuning_mode = mode;
+
+               ret = mBackend->SetCLTuner(&cltuner);
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("SetCLTuner failed");
+                       return FALSE;
+               }
+       }
+
+       ret = mBackend->SetTargetDevices(config.target_devices);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("SetTargetDevices failed");
+               return FALSE;
+       }
+
+       int model_type = GetModelInfo(model_paths, models);
+
+       if (model_type <= INFERENCE_MODEL_NONE) {
+               LOGE("GetModelInfo failed");
+               return FALSE;
+       }
+
+       ret = ConfigureInputInfo();
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("ConfigureInputInfo failed");
+               return FALSE;
+       }
+
+       ret = ConfigureOutputInfo();
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("ConfigureOutputInfo failed");
+               return FALSE;
+       }
+
+       ret = mBackend->Load(models, (inference_model_format_e) model_type);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("Load failed");
+               return FALSE;
+       }
+
+       IETensorBuffer inputs, outputs;
+
+       ret = PrepareTensorBuffers(mBackend, inputs, outputs);
+       if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+               LOGE("PrepareTensorBuffers failed");
+               return FALSE;
+       }
+
+       for (auto& input : inputs) {
+               LOGI("input.second.size :[%d]", input.second.size);
+               CopyRandomMatrixToMemory(input.second, input.second.size);
+       }
+
+       std::chrono::system_clock::time_point StartTime = std::chrono::system_clock::now();
+
+       for (int i = 0; i < MAX_INFERENCE_COUNT; i++) {
+               ret = mBackend->Run(inputs, outputs);
+               if (ret != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("Run failed");
+                       return FALSE;
+               }
+       }
+
+       std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
+
+       _FillOutputResult(mBackend, outputs, result_tensor);
+       CleanupTensorBuffers(inputs, outputs);
+       mBackend->UnbindBackend();
+       models.clear();
+
+       if (mode == INFERENCE_ENGINE_CLTUNER_READ) {
+               std::cout << "*****************************" << std::endl;
+
+               if ( is_actived == false)
+                       std::cout << "Inference Time "<< std::endl;
+               else
+                       std::cout << "Average Inference Time with tune file" << std::endl;
+
+               std::cout << ms.count()/10 << " ms (10 times average)" << std::endl;
+               std::cout << "*****************************" << std::endl;
+       }
+
+       return TRUE;
+}
+
+static gboolean __interpret(char *cmd, char *cmd2)
+{
+       inference_engine_cltuner_mode_e tuning_mode;
+       int res = 0;
+
+       if (strncmp(cmd, "", 1) != 0) {
+               if (strncmp(cmd, "q", 1) == 0)
+                       return FALSE;
+
+               char** value;
+
+               tuning_mode = (inference_engine_cltuner_mode_e)atoi(cmd);
+               model_path = g_strdup(cmd2);
+               value = g_strsplit(cmd2, ".", 0);
+               json_path = g_strdup_printf("%s.json", value[0]);
+               model_paths.push_back(model_path);
+
+               LOGI("model_path : [%s]\n", model_path);
+               LOGI("jsonfile path [%s] \n",json_path);
+               g_strfreev(value);
+
+               res = ParseMetadata(std::string(json_path));
+               if (res != INFERENCE_ENGINE_ERROR_NONE) {
+                       LOGE("PrepareTensorBuffers failed");
+                       return FALSE;
+               }
+
+               if (tuning_mode == INFERENCE_ENGINE_CLTUNER_READ) {
+                       if (!process(orig_tensor, false, false, false, tuning_mode)) {
+                               LOGE("Error is occurred while doing process. \n ");
+                               return FALSE;
+                       }
+
+                       printTensor(orig_tensor);
+
+                       if (!process(tuned_tensor, true, true, false, tuning_mode)) {
+                               LOGE("Error is occurred while doing process with tune file. \n ");
+                               return FALSE;
+                       }
+
+                       printTensor(tuned_tensor);
+               } else {
+                       if (!process(tuned_tensor, true, true, true, tuning_mode)) {
+                               LOGE("Error is occurred while generating tune file. \n ");
+                               return FALSE;
+                       }
+
+                       res = CheckTuneFile();
+                       if (res != INFERENCE_ENGINE_ERROR_NONE) {
+                               LOGE("CheckTuneFile failed");
+                               return FALSE;
+                       }
+               }
+       }
+
+       return TRUE;
+}
+
+int main ()
+{
+       show_menu("CLtuner Generator");
+
+       char mode[MAX_STR];
+       int ret = scanf("%s", mode);
+
+       if (strncmp(mode, "q", 1) == 0) {
+               g_print("exit!\n");
+               return 0;
+       }
+
+       char file_path[MAX_STR];
+
+       ret = scanf("%s",file_path);
+       if (ret == 0 ) {
+               g_print("wrong input.\n");
+               return -1;
+       }
+
+       int _mode = atoi(mode);
+
+       if (_mode < 0 || _mode > 3 ) {
+               g_print("Check tuning mode. It could be out of between RAPID and EXHAUST mode.(1~3)\n");
+               return -1;
+       }
+
+       char **value = g_strsplit(file_path, ".", 0);
+
+       if (value[0] == NULL || value[1] == NULL) {
+               g_print("Check filepath. Please write full path. i.g /root/model.tflite\n");
+               return -1;
+       }
+
+       __interpret(mode,file_path);
+
+       return 0;
+}
\ No newline at end of file