tools: code clean up and relocate global variables 68/266668/14
authorHyunsoo Park <hance.park@samsung.com>
Wed, 17 Nov 2021 04:57:22 +0000 (13:57 +0900)
committerHyunsoo Park <hance.park@samsung.com>
Mon, 22 Nov 2021 05:28:49 +0000 (14:28 +0900)
[Version] 0.1.4-0

[Issue Type] Clean up

Change-Id: Ib2e8619b57948e1c9770a2b91ea10aa936da233f
Signed-off-by: Hyunsoo Park <hance.park@samsung.com>
include/inference_engine_private_type.h
packaging/inference-engine-interface.spec
tools/include/InputMetadata.h
tools/include/Metadata.h
tools/include/OutputMetadata.h
tools/include/inference_engine_cltuner.h
tools/src/inference_engine_cltuner.cpp

index 671c7497878d19b1c4f1e47f786fe4fc38baa4ad..dde57ddaeb9376c30669232cba91b4f76d96d6e9 100644 (file)
@@ -24,4 +24,19 @@ typedef enum {
        INFERENCE_BACKEND_NPU_MAX
 } inference_backend_npu_type_e;
 
+typedef enum {
+       INFERENCE_COLORSPACE_INVALID, /**< The colorspace type is invalid */
+       INFERENCE_COLORSPACE_Y800,    /**< The colorspace type is Y800 */
+       INFERENCE_COLORSPACE_I420,    /**< The colorspace type is I420 */
+       INFERENCE_COLORSPACE_NV12,    /**< The colorspace type is NV12 */
+       INFERENCE_COLORSPACE_YV12,    /**< The colorspace type is YV12 */
+       INFERENCE_COLORSPACE_NV21,    /**< The colorspace type is NV21 */
+       INFERENCE_COLORSPACE_YUYV,    /**< The colorspace type is YUYV */
+       INFERENCE_COLORSPACE_UYVY,    /**< The colorspace type is UYVY */
+       INFERENCE_COLORSPACE_422P,    /**< The colorspace type is 422P */
+       INFERENCE_COLORSPACE_RGB565,  /**< The colorspace type is RGB565 */
+       INFERENCE_COLORSPACE_RGB888,  /**< The colorspace type is RGB888 */
+       INFERENCE_COLORSPACE_RGBA,    /**< The colorspace type is RGBA */
+} inference_colorspace_e;
+
 #endif /* __INFERENCE_ENGINE_PRIVATE_TYPE_H__ */
index 203e3808ebf1e3fbd1d2ef8a82cfc8f0df90e2aa..24384ce0d2ca387260196633966e4c34d1ffaa60 100644 (file)
@@ -1,6 +1,6 @@
 Name:        inference-engine-interface
 Summary:     Interface of inference engines
-Version:     0.1.3
+Version:     0.1.4
 Release:     0
 Group:       Multimedia/Framework
 License:     Apache-2.0
index 05efb7b42027a284c2cc62e5ffc630a2137af488..c894d340bb2686b3053719834868d2c011ad3810 100644 (file)
 #include <vector>
 #include <map>
 
-#include <inference_engine_type.h>
-#include <inference_engine_cltuner.h>
+#include <dlog.h>
+#include "inference_engine_private_type.h"
+#include "inference_engine_type.h"
+#include "inference_engine_error.h"
+
 #include <json-glib/json-glib.h>
 
 /**
index a91104617e2801105d0bba7303fe0019cace2633..3d11401a72045df8b4b3d895e9693208d3260049 100644 (file)
 #include <string>
 #include <map>
 
+
+#include <dlog.h>
+#include "inference_engine_private_type.h"
+#include "inference_engine_type.h"
+#include "inference_engine_error.h"
+
 #include "InputMetadata.h"
 #include "OutputMetadata.h"
 #include <json-glib/json-glib.h>
index bf379fd60f135a4c3287712bfa43a7da111ed579..5d242aa44e4848fcedf1ab840457c6c5d05fb7fc 100644 (file)
 #include <map>
 #include <memory>
 
-#include <inference_engine_type.h>
-#include <inference_engine_cltuner.h>
+#include <dlog.h>
+#include "inference_engine_private_type.h"
+#include "inference_engine_type.h"
+#include "inference_engine_error.h"
+
 #include <json-glib/json-glib.h>
 #include "OutputMetadataTypes.h"
 
index 4e2cfe596c2e0e82c6856903da66a704c2076048..319d737c965027a19a703bf5fc0d40479df4db71 100644 (file)
@@ -28,7 +28,7 @@
 #include "inference_engine_common_impl.h"
 #include "inference_engine_test_common.h"
 
-
+#include "Metadata.h"
 namespace InferenceEngineInterface
 {
 namespace Cltuner
@@ -76,21 +76,6 @@ struct InferenceConfig {
     std::vector<std::string> mOutputLayerNames; /**< The output layer names */
 };
 
-typedef enum {
-       INFERENCE_COLORSPACE_INVALID, /**< The colorspace type is invalid */
-       INFERENCE_COLORSPACE_Y800,    /**< The colorspace type is Y800 */
-       INFERENCE_COLORSPACE_I420,    /**< The colorspace type is I420 */
-       INFERENCE_COLORSPACE_NV12,    /**< The colorspace type is NV12 */
-       INFERENCE_COLORSPACE_YV12,    /**< The colorspace type is YV12 */
-       INFERENCE_COLORSPACE_NV21,    /**< The colorspace type is NV21 */
-       INFERENCE_COLORSPACE_YUYV,    /**< The colorspace type is YUYV */
-       INFERENCE_COLORSPACE_UYVY,    /**< The colorspace type is UYVY */
-       INFERENCE_COLORSPACE_422P,    /**< The colorspace type is 422P */
-       INFERENCE_COLORSPACE_RGB565,  /**< The colorspace type is RGB565 */
-       INFERENCE_COLORSPACE_RGB888,  /**< The colorspace type is RGB888 */
-       INFERENCE_COLORSPACE_RGBA,    /**< The colorspace type is RGBA */
-} inference_colorspace_e;
-
 } /* Cltuner */
 } /* InferenceEngineInterface */
 
index 7fc22fbde3f0404976b0d114faee20a56181c9ff..475a676e227cf469ac0daef2543fd57723425248 100644 (file)
  * limitations under the License.
  */
 
-#include <iostream>
 #include <glib.h>
 #include <glib/gprintf.h>
+#include <iostream>
 #include <json-glib/json-glib.h>
 #include <random>
 
+#include <algorithm>
+#include <chrono>
+#include <fcntl.h>
+#include <fstream>
+#include <map>
+#include <queue>
 #include <string.h>
 #include <tuple>
-#include <map>
-#include <fcntl.h>
 #include <unistd.h>
-#include <queue>
-#include <algorithm>
-#include <fstream>
-#include <chrono>
 
 #include "inference_engine_cltuner.h"
-#include "Metadata.h"
 
 extern "C"
 {
-
 #ifdef LOG_TAG
 #undef LOG_TAG
 #endif
 #define MAX_STR 256
 #define LOG_TAG "INFERENCE_ENGINE_CLTUNER"
 }
-#define ARRAY_SIZE(x) (sizeof((x)) / sizeof((x)[0]))
-#define MAX_WIDTH 1024
-#define MAX_HEIGHT 1024
-#define MAX_CHANNELS 3
+
 #define MAX_INFERENCE_COUNT 10
 
 using namespace InferenceEngineInterface::Common;
 using namespace InferenceEngineInterface::Cltuner;
 
-InferenceEngineCommon *mBackend;
-inference_engine_cltuner cltuner;
-
-char* model_path;
-char* json_path;
-float rand_tensor[MAX_WIDTH][MAX_HEIGHT][MAX_CHANNELS];
-std::vector<std::string> model_paths;
-std::vector<std::string> models;
-tensor_t tuned_tensor;
-tensor_t orig_tensor;
-Metadata mMetadata;
-InferenceConfig mConfig;
-
-int ConfigureInputInfo()
+int ConfigureInputInfo(InferenceEngineCommon* backend, Metadata& metadata,
+                      InferenceConfig& tensorConfig)
 {
        LOGI("ENTER");
 
-       const InputMetadata& inputMeta = mMetadata.GetInputMeta();
+       const InputMetadata& inputMeta = metadata.GetInputMeta();
 
        if (!inputMeta.parsed) {
                LOGE("No meta data parsed.");
@@ -78,15 +61,15 @@ int ConfigureInputInfo()
        auto& layerInfo = inputMeta.layer.begin()->second;
 
        if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NCHW) {
-               mConfig.mTensorInfo.ch = layerInfo.dims[1];
-               mConfig.mTensorInfo.dim = layerInfo.dims[0];
-               mConfig.mTensorInfo.width = layerInfo.dims[3];
-               mConfig.mTensorInfo.height = layerInfo.dims[2];
+               tensorConfig.mTensorInfo.ch = layerInfo.dims[1];
+               tensorConfig.mTensorInfo.dim = layerInfo.dims[0];
+               tensorConfig.mTensorInfo.width = layerInfo.dims[3];
+               tensorConfig.mTensorInfo.height = layerInfo.dims[2];
        } else if (layerInfo.shapeType == INFERENCE_TENSOR_SHAPE_NHWC) {
-               mConfig.mTensorInfo.ch = layerInfo.dims[3];
-               mConfig.mTensorInfo.dim = layerInfo.dims[0];
-               mConfig.mTensorInfo.width = layerInfo.dims[2];
-               mConfig.mTensorInfo.height = layerInfo.dims[1];
+               tensorConfig.mTensorInfo.ch = layerInfo.dims[3];
+               tensorConfig.mTensorInfo.dim = layerInfo.dims[0];
+               tensorConfig.mTensorInfo.width = layerInfo.dims[2];
+               tensorConfig.mTensorInfo.height = layerInfo.dims[1];
        } else {
                LOGE("Invalid shape type[%d]", layerInfo.shapeType);
                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
@@ -95,32 +78,32 @@ int ConfigureInputInfo()
        if (!inputMeta.option.empty()) {
                auto& option = inputMeta.option.begin()->second;
                if (option.normalization.use) {
-                       mConfig.mMeanValue = option.normalization.mean[0];
-                       mConfig.mStdValue = option.normalization.std[0];
+                       tensorConfig.mMeanValue = option.normalization.mean[0];
+                       tensorConfig.mStdValue = option.normalization.std[0];
                }
        }
 
-       if(layerInfo.dataType == 0)
-               mConfig.mDataType = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
+       if (layerInfo.dataType == 0)
+               tensorConfig.mDataType = INFERENCE_TENSOR_DATA_TYPE_FLOAT32;
        else
-               mConfig.mDataType = INFERENCE_TENSOR_DATA_TYPE_UINT8;
+               tensorConfig.mDataType = INFERENCE_TENSOR_DATA_TYPE_UINT8;
 
-       mConfig.mInputLayerNames.clear();
+       tensorConfig.mInputLayerNames.clear();
 
        for (auto& layer : inputMeta.layer)
-               mConfig.mInputLayerNames.push_back(layer.first);
+               tensorConfig.mInputLayerNames.push_back(layer.first);
 
        inference_engine_layer_property property;
 
-       for (auto& name : mConfig.mInputLayerNames) {
+       for (auto& name : tensorConfig.mInputLayerNames) {
                inference_engine_tensor_info tensor_info;
 
-               tensor_info.data_type = mConfig.mDataType;
+               tensor_info.data_type = tensorConfig.mDataType;
                tensor_info.shape_type = INFERENCE_TENSOR_SHAPE_NCHW;
-               tensor_info.shape.push_back(mConfig.mTensorInfo.dim);
-               tensor_info.shape.push_back(mConfig.mTensorInfo.ch);
-               tensor_info.shape.push_back(mConfig.mTensorInfo.height);
-               tensor_info.shape.push_back(mConfig.mTensorInfo.width);
+               tensor_info.shape.push_back(tensorConfig.mTensorInfo.dim);
+               tensor_info.shape.push_back(tensorConfig.mTensorInfo.ch);
+               tensor_info.shape.push_back(tensorConfig.mTensorInfo.height);
+               tensor_info.shape.push_back(tensorConfig.mTensorInfo.width);
                tensor_info.size = 1;
 
                for (auto& dim : tensor_info.shape)
@@ -129,7 +112,7 @@ int ConfigureInputInfo()
                property.layers.insert(std::make_pair(name, tensor_info));
        }
 
-       int ret = mBackend->SetInputLayerProperty(property);
+       int ret = backend->SetInputLayerProperty(property);
 
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("Fail to set input layer property");
@@ -140,33 +123,34 @@ int ConfigureInputInfo()
        return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-int ConfigureOutputInfo()
+int ConfigureOutputInfo(InferenceEngineCommon* backend, Metadata& metadata,
+                       InferenceConfig& tensorConfig)
 {
        LOGI("ENTER");
 
-       OutputMetadata& outputMeta = mMetadata.GetOutputMeta();
+       OutputMetadata& outputMeta = metadata.GetOutputMeta();
 
        if (!outputMeta.IsParsed()) {
                LOGE("No meta data parsed.");
                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
        }
 
-       mConfig.mOutputLayerNames.clear();
+       tensorConfig.mOutputLayerNames.clear();
        if (!outputMeta.GetScore().GetName().empty())
-               mConfig.mOutputLayerNames.push_back(outputMeta.GetScore().GetName());
+               tensorConfig.mOutputLayerNames.push_back(
+                   outputMeta.GetScore().GetName());
 
        inference_engine_layer_property property;
-       inference_engine_tensor_info tensor_info = { std::vector<size_t>{1},
-                                               INFERENCE_TENSOR_SHAPE_NCHW,
-                                               INFERENCE_TENSOR_DATA_TYPE_FLOAT32,
-                                               1};
+       inference_engine_tensor_info tensor_info = {
+           std::vector<size_t>{1}, INFERENCE_TENSOR_SHAPE_NCHW,
+           INFERENCE_TENSOR_DATA_TYPE_FLOAT32, 1};
 
-       for (auto& name : mConfig.mOutputLayerNames) {
+       for (auto& name : tensorConfig.mOutputLayerNames) {
                LOGI("Configure %s layer as output", name.c_str());
                property.layers.insert(std::make_pair(name, tensor_info));
        }
 
-       int ret = mBackend->SetOutputLayerProperty(property);
+       int ret = backend->SetOutputLayerProperty(property);
 
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("Fail to set output layer property");
@@ -177,20 +161,19 @@ int ConfigureOutputInfo()
        return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-
-int ParseMetadata(std::string filePath)
+int ParseMetadata(Metadata& metadata, std::string filePath)
 {
        LOGI("ENTER");
        LOGI("filePath : %s", filePath.c_str());
 
-       int ret = mMetadata.Init(filePath);
+       int ret = metadata.Init(filePath);
 
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("Fail to init metadata[%d]", ret);
                return ret;
        }
 
-       ret = mMetadata.Parse();
+       ret = metadata.Parse();
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("Fail to parse metadata[%d]", ret);
                return ret;
@@ -200,9 +183,8 @@ int ParseMetadata(std::string filePath)
        return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-void _FillOutputResult(InferenceEngineCommon *engine,
-                                         IETensorBuffer &outputs,
-                                         tensor_t &outputData)
+void _FillOutputResult(InferenceEngineCommon* engine, IETensorBuffer& outputs,
+                      tensor_t& outputData)
 {
        inference_engine_layer_property property;
 
@@ -220,36 +202,40 @@ void _FillOutputResult(InferenceEngineCommon *engine,
                outputData.dimInfo.push_back(tmpDimInfo);
 
                if (tensor_info.data_type == INFERENCE_TENSOR_DATA_TYPE_UINT8) {
-                       auto *ori_buf = static_cast<unsigned char *>(outputs[layer.first].buffer);
-                       float *new_buf = new float[tensor_info.size];
+                       auto* ori_buf = static_cast<unsigned char*>(
+                           outputs[layer.first].buffer);
+                       float* new_buf = new float[tensor_info.size];
 
-                       for (int j = 0; j < (int) tensor_info.size; j++) {
-                               new_buf[j] = (float) ori_buf[j] / 255.0f;
+                       for (int j = 0; j < (int)tensor_info.size; j++) {
+                               new_buf[j] = (float)ori_buf[j] / 255.0f;
                        }
 
-                       // replace original buffer with new one, and release origin one.
+                       // replace original buffer with new one, and release
+                       // origin one.
                        outputs[layer.first].buffer = new_buf;
                        if (!outputs[layer.first].owner_is_backend) {
                                delete[] ori_buf;
                        }
                }
 
-               LOGI("tensor_info.data_type  %d", tensor_info.data_type );
-               outputData.data.push_back(static_cast<void *>(outputs[layer.first].buffer));
+               LOGI("tensor_info.data_type  %d", tensor_info.data_type);
+               outputData.data.push_back(
+                   static_cast<void*>(outputs[layer.first].buffer));
        }
 }
 
-static void printTensor(tensor_t &outputData) {
+static void printTensor(tensor_t& outputData)
+{
        std::vector<std::vector<int>> inferDimInfo(outputData.dimInfo);
-       std::vector<void *> inferResults(outputData.data.begin(),
-                                                                       outputData.data.end());
+       std::vector<void*> inferResults(outputData.data.begin(),
+                                       outputData.data.end());
        int count = inferDimInfo[0][1];
-       int idx= -1;
+       int idx = -1;
        float value = 0.0f;
-       float *prediction = reinterpret_cast<float *>(inferResults[0]);
+       float* prediction = reinterpret_cast<float*>(inferResults[0]);
 
        for (int i = 0; i < count; ++i) {
-               LOGI(" prediction[%d] %f",i, prediction[i]);
+               LOGI(" prediction[%d] %f", i, prediction[i]);
                if (value < prediction[i]) {
                        value = prediction[i];
                        idx = i;
@@ -259,12 +245,15 @@ static void printTensor(tensor_t &outputData) {
        LOGI("Best Prediction  : prediction[%d] : %f ", idx, value);
 }
 
-static void show_menu(const char *title) {
+static void show_menu(const char* title)
+{
        g_print("*******************************************\n");
        g_print("*  %-38s *\n", title);
        g_print("*-----------------------------------------*\n");
        g_print("*  %-38s *\n", "Input Tuning mode and Model file");
-       g_print("*  %-38s *\n", "ex)1 /usr/share/capi-media-vision/models/IC/tflite/ic_tflite_model.tflite");
+       g_print("*  %-38s *\n", "ex)1 "
+                               "/usr/share/capi-media-vision/models/IC/tflite/"
+                               "ic_tflite_model.tflite");
        g_print("*  %-38s *\n", "**caution**");
        g_print("*  %-38s *\n", "'READ' mode should be executed");
        g_print("*  %-38s *\n", "after generating tune file.");
@@ -279,7 +268,8 @@ static void show_menu(const char *title) {
        g_print("*******************************************\n\n");
 }
 
-int CheckTuneFile() {
+int CheckTuneFile(std::vector<std::string>& model_paths)
+{
        std::string tune_file = model_paths[0];
 
        tune_file.append(".tune");
@@ -287,7 +277,8 @@ int CheckTuneFile() {
        int fd = open(tune_file.c_str(), O_RDONLY);
 
        if (fd == -1) {
-               g_print("Tune file open failed!! (It could be genereation failure.)\n");
+               g_print("Tune file open failed!! (It could be genereation "
+                       "failure.)\n");
                return INFERENCE_ENGINE_ERROR_INVALID_OPERATION;
        }
 
@@ -295,52 +286,60 @@ int CheckTuneFile() {
 
        fsize = lseek(fd, 0, SEEK_END);
        g_print("************TUNE FILE GENERATED**************\n");
-       g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(), (long long)fsize);
+       g_print("Location \n[%s] \nSize \n[%lld]\n", tune_file.c_str(),
+               (long long)fsize);
        g_print("*-------------------------------------------*\n\n\n");
        close(fd);
 
        return INFERENCE_ENGINE_ERROR_NONE;
 }
 
-void CopyRandomMatrixToMemory(inference_engine_tensor_buffer &buffer, unsigned int size) {
+void CopyRandomMatrixToMemory(inference_engine_tensor_buffer& buffer, InferenceConfig tensorConfig)
+{
        std::random_device rd;
        std::mt19937 generator(rd());
        std::uniform_real_distribution<> distribution(1.0, 255.0);
 
-       for (int i=0; i<mConfig.mTensorInfo.height; i++) {
-               for (int j=0; j<mConfig.mTensorInfo.width; j++) {
-                       for (int k=0; k<mConfig.mTensorInfo.ch; k++) {
-                               rand_tensor[i][j][k] = distribution(generator);
+       int height = tensorConfig.mTensorInfo.height;
+       int width = tensorConfig.mTensorInfo.width;
+       int ch = tensorConfig.mTensorInfo.ch;
+       for (int h_offset = 0; h_offset < height; h_offset++)
+               for (int w_offset = 0; w_offset < width; w_offset++)
+                       for (int ch_offset = 0; ch_offset < ch; ch_offset++) {
+                               int offset = h_offset * width * ch + w_offset * ch + ch_offset;
+                               static_cast<float*>(buffer.buffer)[offset] = distribution(generator);
                        }
-               }
-       }
-
-       memcpy(buffer.buffer, rand_tensor, size);
 }
 
-static gboolean process(tensor_t& result_tensor, bool is_supported, bool is_actived, bool is_updated, inference_engine_cltuner_mode_e mode) {
-
-       inference_engine_config config = {
-               .backend_name = "armnn",
-               .backend_type = INFERENCE_BACKEND_ARMNN,
-               .target_devices = INFERENCE_TARGET_GPU
-       };
-
-       mBackend = new InferenceEngineCommon();
-
-       int ret = mBackend->EnableProfiler(true);
+static gboolean process(std::vector<std::string>& model_paths,
+                       tensor_t& result_tensor, Metadata& metadata,
+                       bool is_supported, bool is_actived, bool is_updated,
+                       inference_engine_cltuner_mode_e mode)
+{
+       InferenceEngineCommon* backend;
+       std::vector<std::string> models;
+       inference_engine_cltuner cltuner;
+       InferenceConfig tensorConfig;
+       inference_engine_config engineConfig = {
+           .backend_name = "armnn",
+           .backend_type = INFERENCE_BACKEND_ARMNN,
+           .target_devices = INFERENCE_TARGET_GPU};
+
+       backend = new InferenceEngineCommon();
+
+       int ret = backend->EnableProfiler(true);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("EnableProfiler(); failed");
                return FALSE;
        }
 
-       ret = mBackend->LoadConfigFile();
+       ret = backend->LoadConfigFile();
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("LoadConfigFile(); failed");
                return FALSE;
        }
 
-       ret = mBackend->BindBackend(&config);
+       ret = backend->BindBackend(&engineConfig);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("BindBackend failed");
                return FALSE;
@@ -348,7 +347,7 @@ static gboolean process(tensor_t& result_tensor, bool is_supported, bool is_acti
 
        inference_engine_capacity capacity;
 
-       ret = mBackend->GetBackendCapacity(&capacity);
+       ret = backend->GetBackendCapacity(&capacity);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("GetBackendCapacity failed");
                return FALSE;
@@ -360,14 +359,14 @@ static gboolean process(tensor_t& result_tensor, bool is_supported, bool is_acti
                cltuner.update = is_updated;
                cltuner.tuning_mode = mode;
 
-               ret = mBackend->SetCLTuner(&cltuner);
+               ret = backend->SetCLTuner(&cltuner);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                        LOGE("SetCLTuner failed");
                        return FALSE;
                }
        }
 
-       ret = mBackend->SetTargetDevices(config.target_devices);
+       ret = backend->SetTargetDevices(engineConfig.target_devices);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("SetTargetDevices failed");
                return FALSE;
@@ -380,19 +379,19 @@ static gboolean process(tensor_t& result_tensor, bool is_supported, bool is_acti
                return FALSE;
        }
 
-       ret = ConfigureInputInfo();
+       ret = ConfigureInputInfo(backend, metadata, tensorConfig);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("ConfigureInputInfo failed");
                return FALSE;
        }
 
-       ret = ConfigureOutputInfo();
+       ret = ConfigureOutputInfo(backend, metadata, tensorConfig);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("ConfigureOutputInfo failed");
                return FALSE;
        }
 
-       ret = mBackend->Load(models, (inference_model_format_e) model_type);
+       ret = backend->Load(models, (inference_model_format_e)model_type);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("Load failed");
                return FALSE;
@@ -400,7 +399,7 @@ static gboolean process(tensor_t& result_tensor, bool is_supported, bool is_acti
 
        IETensorBuffer inputs, outputs;
 
-       ret = PrepareTensorBuffers(mBackend, inputs, outputs);
+       ret = PrepareTensorBuffers(backend, inputs, outputs);
        if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                LOGE("PrepareTensorBuffers failed");
                return FALSE;
@@ -408,45 +407,53 @@ static gboolean process(tensor_t& result_tensor, bool is_supported, bool is_acti
 
        for (auto& input : inputs) {
                LOGI("input.second.size :[%zu]", input.second.size);
-               CopyRandomMatrixToMemory(input.second, input.second.size);
+               CopyRandomMatrixToMemory(input.second, tensorConfig);
        }
 
-       std::chrono::system_clock::time_point StartTime = std::chrono::system_clock::now();
+       std::chrono::system_clock::time_point StartTime =
+           std::chrono::system_clock::now();
 
        for (int i = 0; i < MAX_INFERENCE_COUNT; i++) {
-               ret = mBackend->Run(inputs, outputs);
+               ret = backend->Run(inputs, outputs);
                if (ret != INFERENCE_ENGINE_ERROR_NONE) {
                        LOGE("Run failed");
                        return FALSE;
                }
        }
 
-       std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now() - StartTime);
+       std::chrono::milliseconds ms =
+           std::chrono::duration_cast<std::chrono::milliseconds>(
+               std::chrono::system_clock::now() - StartTime);
 
-       _FillOutputResult(mBackend, outputs, result_tensor);
+       _FillOutputResult(backend, outputs, result_tensor);
        CleanupTensorBuffers(inputs, outputs);
-       mBackend->UnbindBackend();
-       models.clear();
+       backend->UnbindBackend();
 
        if (mode == INFERENCE_ENGINE_CLTUNER_READ) {
                std::cout << "*****************************" << std::endl;
 
-               if ( is_actived == false)
-                       std::cout << "Inference Time "<< std::endl;
+               if (is_actived == false)
+                       std::cout << "Inference Time " << std::endl;
                else
-                       std::cout << "Average Inference Time with tune file" << std::endl;
+                       std::cout << "Average Inference Time with tune file"
+                                 << std::endl;
 
-               std::cout << ms.count()/10 << " ms (10 times average)" << std::endl;
+               std::cout << ms.count() / 10 << " ms (10 times average)"
+                         << std::endl;
                std::cout << "*****************************" << std::endl;
        }
 
        return TRUE;
 }
 
-static gboolean __interpret(char *cmd, char *cmd2)
+static gboolean __interpret(char* cmd, char* cmd2)
 {
+       std::vector<std::string> model_paths;
+       Metadata metadata;
        inference_engine_cltuner_mode_e tuning_mode;
        int res = 0;
+       char* model_path;
+       char* json_path;
 
        if (strncmp(cmd, "", 1) != 0) {
                if (strncmp(cmd, "q", 1) == 0)
@@ -459,38 +466,51 @@ static gboolean __interpret(char *cmd, char *cmd2)
                value = g_strsplit(cmd2, ".", 0);
                json_path = g_strdup_printf("%s.json", value[0]);
                model_paths.push_back(model_path);
+               g_free(model_path);
 
                LOGI("model_path : [%s]\n", model_path);
-               LOGI("jsonfile path [%s] \n",json_path);
+               LOGI("jsonfile path [%s] \n", json_path);
                g_strfreev(value);
 
-               res = ParseMetadata(std::string(json_path));
+               res = ParseMetadata(metadata, std::string(json_path));
+               g_free(json_path);
+
                if (res != INFERENCE_ENGINE_ERROR_NONE) {
                        LOGE("PrepareTensorBuffers failed");
                        return FALSE;
                }
 
                if (tuning_mode == INFERENCE_ENGINE_CLTUNER_READ) {
-                       if (!process(orig_tensor, false, false, false, tuning_mode)) {
-                               LOGE("Error is occurred while doing process. \n ");
+                       tensor_t orig_tensor;
+                       if (!process(model_paths, orig_tensor, metadata, false,
+                                    false, false, tuning_mode)) {
+                               LOGE("Error is occurred while doing process. "
+                                    "\n ");
                                return FALSE;
                        }
 
                        printTensor(orig_tensor);
 
-                       if (!process(tuned_tensor, true, true, false, tuning_mode)) {
-                               LOGE("Error is occurred while doing process with tune file. \n ");
+                       tensor_t tuned_tensor;
+                       if (!process(model_paths, tuned_tensor, metadata, true,
+                                    true, false, tuning_mode)) {
+                               LOGE("Error is occurred while doing process "
+                                    "with tune file. "
+                                    "\n ");
                                return FALSE;
                        }
 
                        printTensor(tuned_tensor);
                } else {
-                       if (!process(tuned_tensor, true, true, true, tuning_mode)) {
-                               LOGE("Error is occurred while generating tune file. \n ");
+                       tensor_t tuned_tensor;
+                       if (!process(model_paths, tuned_tensor, metadata, true,
+                                    true, true, tuning_mode)) {
+                               LOGE("Error is occurred while generating tune "
+                                    "file. \n ");
                                return FALSE;
                        }
 
-                       res = CheckTuneFile();
+                       res = CheckTuneFile(model_paths);
                        if (res != INFERENCE_ENGINE_ERROR_NONE) {
                                LOGE("CheckTuneFile failed");
                                return FALSE;
@@ -501,7 +521,7 @@ static gboolean __interpret(char *cmd, char *cmd2)
        return TRUE;
 }
 
-int main ()
+int main()
 {
        show_menu("CLtuner Generator");
 
@@ -515,27 +535,30 @@ int main ()
 
        char file_path[MAX_STR];
 
-       ret = scanf("%s",file_path);
-       if (ret == 0 ) {
+       ret = scanf("%s", file_path);
+       if (ret == 0) {
                g_print("wrong input.\n");
                return -1;
        }
 
        int _mode = atoi(mode);
 
-       if (_mode < 0 || _mode > 3 ) {
-               g_print("Check tuning mode. It could be out of between RAPID and EXHAUST mode.(1~3)\n");
+       if (_mode < 0 || _mode > 3) {
+               g_print(
+                   "Check tuning mode. It could be out of between RAPID and "
+                   "EXHAUST mode.(1~3)\n");
                return -1;
        }
 
-       char **value = g_strsplit(file_path, ".", 0);
+       char** value = g_strsplit(file_path, ".", 0);
 
        if (value[0] == NULL || value[1] == NULL) {
-               g_print("Check filepath. Please write full path. i.g /root/model.tflite\n");
+               g_print("Check filepath. Please write full path. i.g "
+                       "/root/model.tflite\n");
                return -1;
        }
 
-       __interpret(mode,file_path);
+       __interpret(mode, file_path);
 
        return 0;
 }