mv_machine_learning: code cleanup for refactoring 95/264195/2
authorInki Dae <inki.dae@samsung.com>
Tue, 14 Sep 2021 06:55:36 +0000 (15:55 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 15 Sep 2021 02:58:31 +0000 (11:58 +0900)
Cleaned up mv_inference_open.cpp module for code refactoring.
What cleanups are,
 - correct variable name which doesn't clarify what the name means.
 - change goto label name to be meaningful with resource release issue.
 - drop unnecessary code.
 - move code so that relevant code could be grouped together.

This patch is just one step of what we have to for code refactoring.

Change-Id: Ia744508e0318d6558a0f4901a90d7ac8a14634e2
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/mv_inference/inference/src/mv_inference_open.cpp

index da295ab..85ff93c 100644 (file)
@@ -75,6 +75,7 @@ static int check_mv_inference_engine_version(mv_engine_config_h engine_config,
 mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
+
        return pInfer->GetEngineConfig();
 }
 
@@ -125,9 +126,6 @@ int mv_inference_configure_model_open(mv_inference_h infer,
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-
        char *modelConfigFilePath = NULL;
        char *modelWeightFilePath = NULL;
        char *modelUserFilePath = NULL;
@@ -136,12 +134,12 @@ int mv_inference_configure_model_open(mv_inference_h infer,
        int backendType = 0;
        size_t userFileLength = 0;
 
-       ret = mv_engine_config_get_string_attribute(
+       int ret = mv_engine_config_get_string_attribute(
                        engine_config, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
                        &modelConfigFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model configuration file path");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_string_attribute(
@@ -149,7 +147,7 @@ int mv_inference_configure_model_open(mv_inference_h infer,
                        &modelWeightFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model weight file path");
-               goto _ERROR_;
+               goto release_model_config_file_path;
        }
 
        ret = mv_engine_config_get_string_attribute(
@@ -157,7 +155,7 @@ int mv_inference_configure_model_open(mv_inference_h infer,
                        &modelUserFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model user file path");
-               goto _ERROR_;
+               goto release_model_weight_file_path;
        }
 
        ret = mv_engine_config_get_string_attribute(
@@ -165,27 +163,27 @@ int mv_inference_configure_model_open(mv_inference_h infer,
                        &modelMetaFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model meta file path");
-               goto _ERROR_;
+               goto release_model_user_file_path;
        }
 
        ret = mv_engine_config_get_double_attribute(
                        engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &modelMeanValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model mean value");
-               goto _ERROR_;
+               goto release_model_meta_file_path;
        }
 
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference backend type");
-               goto _ERROR_;
+               goto release_model_meta_file_path;
        }
 
        if (access(modelWeightFilePath, F_OK)) {
                LOGE("weightFilePath in [%s] ", modelWeightFilePath);
                ret = MEDIA_VISION_ERROR_INVALID_PATH;
-               goto _ERROR_;
+               goto release_model_meta_file_path;
        }
 
        if ((backendType > MV_INFERENCE_BACKEND_NONE &&
@@ -198,15 +196,16 @@ int mv_inference_configure_model_open(mv_inference_h infer,
                if (access(modelConfigFilePath, F_OK)) {
                        LOGE("modelConfigFilePath in [%s] ", modelConfigFilePath);
                        ret = MEDIA_VISION_ERROR_INVALID_PATH;
-                       goto _ERROR_;
+                       goto release_model_meta_file_path;
                }
        }
 
        userFileLength = strlen(modelUserFilePath);
+
        if (userFileLength > 0 && access(modelUserFilePath, F_OK)) {
                LOGE("categoryFilePath in [%s] ", modelUserFilePath);
                ret = MEDIA_VISION_ERROR_INVALID_PATH;
-               goto _ERROR_;
+               goto release_model_meta_file_path;
        }
 
        pInfer->ConfigureModelFiles(std::string(modelConfigFilePath),
@@ -215,13 +214,13 @@ int mv_inference_configure_model_open(mv_inference_h infer,
 
        if (std::string(modelMetaFilePath).empty()) {
                LOGW("Skip ParseMetadata and run without Metadata");
-               goto _ERROR_;
+               goto release_model_meta_file_path;
        }
 
        if (!IsJsonFile(std::string(modelMetaFilePath))) {
                ret = MEDIA_VISION_ERROR_INVALID_PATH;
                LOGE("Model meta file should be json");
-               goto _ERROR_;
+               goto release_model_meta_file_path;
        }
 
        ret = pInfer->ParseMetadata(std::string(modelMetaFilePath));
@@ -229,19 +228,23 @@ int mv_inference_configure_model_open(mv_inference_h infer,
                LOGE("Fail to ParseMetadata");
        }
 
-_ERROR_:
-       if (modelConfigFilePath)
-               free(modelConfigFilePath);
-
-       if (modelWeightFilePath)
-               free(modelWeightFilePath);
+release_model_meta_file_path:
+       if (modelMetaFilePath)
+               free(modelMetaFilePath);
 
+release_model_user_file_path:
        if (modelUserFilePath)
                free(modelUserFilePath);
 
-       if (modelMetaFilePath)
-               free(modelMetaFilePath);
+release_model_weight_file_path:
+       if (modelWeightFilePath)
+               free(modelWeightFilePath);
+
+release_model_config_file_path:
+       if (modelConfigFilePath)
+               free(modelConfigFilePath);
 
+out_of_function:
        LOGI("LEAVE");
 
        return ret;
@@ -253,54 +256,47 @@ int mv_inference_configure_tensor_info_open(mv_inference_h infer,
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       int tensorWidth, tensorHeight, tensorDim, tensorCh;
+       int tensorWidth, tensorHeight, tensorCh;
        double meanValue, stdValue;
 
-       // This should be one. only one batch is supported
-       tensorDim = 1;
-       ret = mv_engine_config_get_int_attribute(
+       int ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor width");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor height");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor channels");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_double_attribute(
                        engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get meanValue");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_double_attribute(
                        engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get stdValue");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
-       pInfer->ConfigureTensorInfo(tensorWidth, tensorHeight, tensorDim, tensorCh,
+       pInfer->ConfigureTensorInfo(tensorWidth, tensorHeight, 1, tensorCh,
                                                                stdValue, meanValue);
-
-_ERROR_:
-
+out_of_function:
        LOGI("LEAVE");
 
        return ret;
@@ -312,76 +308,70 @@ int mv_inference_configure_input_info_open(mv_inference_h infer,
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       int tensorWidth, tensorHeight, tensorDim, tensorCh;
+       int tensorWidth, tensorHeight, tensorCh;
        double meanValue, stdValue;
        char *node_name = NULL;
        int dataType = 0;
 
-       // This should be one. only one batch is supported
-       tensorDim = 1;
-       ret = mv_engine_config_get_int_attribute(
+       int ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor width");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor height");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor channels");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_double_attribute(
                        engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get meanValue");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_double_attribute(
                        engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get stdValue");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_INPUT_DATA_TYPE, &dataType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get a input tensor data type");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_string_attribute(
                        engine_config, MV_INFERENCE_INPUT_NODE_NAME, &node_name);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor width");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        pInfer->ConfigureInputInfo(
-                       tensorWidth, tensorHeight, tensorDim, tensorCh, stdValue, meanValue,
+                       tensorWidth, tensorHeight, 1, tensorCh, stdValue, meanValue,
                        dataType, std::vector<std::string>(1, std::string(node_name)));
 
-_ERROR_:
-
        if (node_name) {
                free(node_name);
                node_name = NULL;
        }
 
+out_of_function:
        LOGI("LEAVE");
 
        return ret;
@@ -395,29 +385,28 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
        Inference *pInfer = static_cast<Inference *>(infer);
        int backendType = 0;
        int targetTypes = 0;
-       int ret = MEDIA_VISION_ERROR_NONE;
 
        pInfer->SetEngineConfig(engine_config);
 
-       ret = mv_engine_config_get_int_attribute(
+       int ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference backend type");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference target type");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        ret = pInfer->ConfigureBackendType(
                        (mv_inference_backend_type_e) backendType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to configure a backend type.");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        bool is_new_version;
@@ -430,13 +419,13 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
        // Ps. this function will be dropped with deprecated code version-after-next of Tizen.
        ret = check_mv_inference_engine_version(engine_config, &is_new_version);
        if (ret != MEDIA_VISION_ERROR_NONE)
-               goto _ERROR_;
+               goto out_of_function;
 
        // Convert old type to new one and then use it if is_new_version is false
        if (pInfer->ConfigureTargetTypes(targetTypes, is_new_version) !=
                MEDIA_VISION_ERROR_NONE) {
                LOGE("Tried to configure invalid target types.");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        // Create a inference-engine-common class object and load its corresponding library.
@@ -445,7 +434,7 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
        ret = pInfer->Bind();
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to bind a backend engine.");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        if (!pInfer->IsTargetDeviceSupported(targetTypes)) {
@@ -453,8 +442,9 @@ int mv_inference_configure_engine_open(mv_inference_h infer,
                LOGE("Tried to configure invalid target types.");
        }
 
-_ERROR_:
+out_of_function:
        LOGI("LEAVE");
+
        return ret;
 }
 
@@ -464,21 +454,20 @@ int mv_inference_configure_output_open(mv_inference_h infer,
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
        int maxOutput = 0;
-       int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_engine_config_get_int_attribute(
+       int ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference output maximum numbers");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        pInfer->ConfigureOutput(maxOutput);
 
+out_of_function:
        LOGI("LEAVE");
-_ERROR_:
+
        return ret;
 }
 
@@ -488,21 +477,20 @@ int mv_inference_configure_confidence_threshold_open(
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
        double threshold = 0;
-       int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_engine_config_get_double_attribute(
+       int ret = mv_engine_config_get_double_attribute(
                        engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference confidence threshold value");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        pInfer->ConfigureThreshold(threshold);
 
+out_of_function:
        LOGI("LEAVE");
-_ERROR_:
+
        return ret;
 }
 
@@ -512,16 +500,14 @@ int mv_inference_configure_post_process_info_open(
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
        int maxOutput = 0;
        double threshold = 0;
-       int ret = MEDIA_VISION_ERROR_NONE;
 
-       ret = mv_engine_config_get_int_attribute(
+       int ret = mv_engine_config_get_int_attribute(
                        engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference output maximum numbers");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        pInfer->ConfigureOutput(maxOutput);
@@ -530,13 +516,14 @@ int mv_inference_configure_post_process_info_open(
                        engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference confidence threshold value");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        pInfer->ConfigureThreshold(threshold);
 
+out_of_function:
        LOGI("LEAVE");
-_ERROR_:
+
        return ret;
 }
 
@@ -546,17 +533,16 @@ int mv_inference_configure_output_info_open(mv_inference_h infer,
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
        int idx = 0;
        char **node_names = NULL;
        int size = 0;
        std::vector<std::string> names;
-       ret = mv_engine_config_get_array_string_attribute(
+
+       int ret = mv_engine_config_get_array_string_attribute(
                        engine_config, MV_INFERENCE_OUTPUT_NODE_NAMES, &node_names, &size);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get _output_node_names");
-               goto _ERROR_;
+               goto out_of_function;
        }
 
        for (idx = 0; idx < size; ++idx)
@@ -564,16 +550,15 @@ int mv_inference_configure_output_info_open(mv_inference_h infer,
 
        pInfer->ConfigureOutputInfo(names);
 
-_ERROR_:
-
        if (node_names) {
-               for (idx = 0; idx < size; ++idx) {
+               for (idx = 0; idx < size; ++idx)
                        free(node_names[idx]);
-               }
+
                free(node_names);
                node_names = NULL;
        }
 
+out_of_function:
        LOGI("LEAVE");
 
        return ret;
@@ -585,13 +570,11 @@ int mv_inference_prepare_open(mv_inference_h infer)
 
        Inference *pInfer = static_cast<Inference *>(infer);
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
        // Pass parameters needed to load model files to a backend engine.
-       ret = pInfer->Prepare();
+       int ret = pInfer->Prepare();
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to prepare inference");
-               return ret;
+               goto out_of_function;
        }
 
        // Request to load model files to a backend engine.
@@ -599,6 +582,7 @@ int mv_inference_prepare_open(mv_inference_h infer)
        if (ret != MEDIA_VISION_ERROR_NONE)
                LOGE("Fail to load model files.");
 
+out_of_function:
        LOGI("LEAVE");
 
        return ret;
@@ -611,12 +595,8 @@ int mv_inference_foreach_supported_engine_open(
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       //bool isSupported = false;
-       //char str[1024] = {'\0'};
        std::pair<std::string, bool> backend;
+
        for (int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
                backend = pInfer->GetSupportedInferenceBackend(i);
                callback((backend.first).c_str(), backend.second, user_data);
@@ -624,7 +604,7 @@ int mv_inference_foreach_supported_engine_open(
 
        LOGI("LEAVE");
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_image_classify_open(
@@ -632,9 +612,6 @@ int mv_inference_image_classify_open(
                mv_inference_image_classified_cb classified_cb, void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-       int numberOfOutputs = 0;
        std::vector<mv_source_h> sources;
        std::vector<mv_rectangle_s> rects;
 
@@ -643,7 +620,7 @@ int mv_inference_image_classify_open(
        if (roi != NULL)
                rects.push_back(*roi);
 
-       ret = pInfer->Run(sources, rects);
+       int ret = pInfer->Run(sources, rects);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to run inference");
                return ret;
@@ -657,10 +634,8 @@ int mv_inference_image_classify_open(
                return ret;
        }
 
-       numberOfOutputs = classificationResults.number_of_classes;
+       int numberOfOutputs = classificationResults.number_of_classes;
 
-       int *indices = classificationResults.indices.data();
-       float *confidences = classificationResults.confidences.data();
        static const int START_CLASS_NUMBER = 10;
        static std::vector<const char *> names(START_CLASS_NUMBER);
 
@@ -675,6 +650,9 @@ int mv_inference_image_classify_open(
                names[n] = classificationResults.names[n].c_str();
        }
 
+       int *indices = classificationResults.indices.data();
+       float *confidences = classificationResults.confidences.data();
+
        classified_cb(source, numberOfOutputs, indices, names.data(), confidences,
                                  user_data);
 
@@ -686,31 +664,25 @@ int mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
                                                                        void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-       int numberOfOutputs = 0;
        std::vector<mv_source_h> sources;
        std::vector<mv_rectangle_s> rects;
 
        sources.push_back(source);
-
-       ret = pInfer->Run(sources, rects);
+       int ret = pInfer->Run(sources, rects);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to run inference");
                return ret;
        }
 
        ObjectDetectionResults objectDetectionResults;
+
        ret = pInfer->GetObjectDetectionResults(&objectDetectionResults);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference results");
                return ret;
        }
 
-       numberOfOutputs = objectDetectionResults.number_of_objects;
-
-       int *indices = objectDetectionResults.indices.data();
-       float *confidences = objectDetectionResults.confidences.data();
+       int numberOfOutputs = objectDetectionResults.number_of_objects;
        static const int START_OBJECT_NUMBER = 20;
        static std::vector<const char *> names(START_OBJECT_NUMBER);
        static std::vector<mv_rectangle_s> locations(START_OBJECT_NUMBER);
@@ -730,6 +702,9 @@ int mv_inference_object_detect_open(mv_source_h source, mv_inference_h infer,
                locations[n].height = objectDetectionResults.locations[n].height;
        }
 
+       int *indices = objectDetectionResults.indices.data();
+       float *confidences = objectDetectionResults.confidences.data();
+
        detected_cb(source, numberOfOutputs, indices, names.data(), confidences,
                                locations.data(), user_data);
 
@@ -741,30 +716,26 @@ int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer,
                                                                  void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-       int numberOfOutputs = 0;
        std::vector<mv_source_h> sources;
        std::vector<mv_rectangle_s> rects;
 
        sources.push_back(source);
 
-       ret = pInfer->Run(sources, rects);
+       int ret = pInfer->Run(sources, rects);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to run inference");
                return ret;
        }
 
        FaceDetectionResults faceDetectionResults;
+
        ret = pInfer->GetFaceDetectionResults(&faceDetectionResults);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference results");
                return ret;
        }
 
-       numberOfOutputs = faceDetectionResults.number_of_faces;
-
-       float *confidences = faceDetectionResults.confidences.data();
+       int numberOfOutputs = faceDetectionResults.number_of_faces;
        std::vector<mv_rectangle_s> locations(numberOfOutputs);
 
        for (int n = 0; n < numberOfOutputs; ++n) {
@@ -774,6 +745,8 @@ int mv_inference_face_detect_open(mv_source_h source, mv_inference_h infer,
                locations[n].height = faceDetectionResults.locations[n].height;
        }
 
+       float *confidences = faceDetectionResults.confidences.data();
+
        detected_cb(source, numberOfOutputs, confidences, locations.data(),
                                user_data);
 
@@ -785,14 +758,9 @@ int mv_inference_facial_landmark_detect_open(
                mv_inference_facial_landmark_detected_cb detected_cb, void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-       int numberOfLandmarks = 0;
-       std::vector<mv_source_h> sources;
-       std::vector<mv_rectangle_s> rects;
-
        unsigned int width, height;
-       ret = mv_source_get_width(source, &width);
+
+       int ret = mv_source_get_width(source, &width);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get width");
                return ret;
@@ -804,6 +772,9 @@ int mv_inference_facial_landmark_detect_open(
                return ret;
        }
 
+       std::vector<mv_source_h> sources;
+       std::vector<mv_rectangle_s> rects;
+
        sources.push_back(source);
 
        if (roi != NULL)
@@ -816,6 +787,7 @@ int mv_inference_facial_landmark_detect_open(
        }
 
        FacialLandMarkDetectionResults facialLandMarkDetectionResults;
+
        ret = pInfer->GetFacialLandMarkDetectionResults(
                        &facialLandMarkDetectionResults, width, height);
        if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -823,8 +795,7 @@ int mv_inference_facial_landmark_detect_open(
                return ret;
        }
 
-       numberOfLandmarks = facialLandMarkDetectionResults.number_of_landmarks;
-
+       int numberOfLandmarks = facialLandMarkDetectionResults.number_of_landmarks;
        std::vector<mv_point_s> locations(numberOfLandmarks);
 
        for (int n = 0; n < numberOfLandmarks; ++n) {
@@ -842,13 +813,9 @@ int mv_inference_pose_landmark_detect_open(
                mv_inference_pose_landmark_detected_cb detected_cb, void *user_data)
 {
        Inference *pInfer = static_cast<Inference *>(infer);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-       std::vector<mv_source_h> sources;
-       std::vector<mv_rectangle_s> rects;
-
        unsigned int width, height;
-       ret = mv_source_get_width(source, &width);
+
+       int ret = mv_source_get_width(source, &width);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get width");
                return ret;
@@ -860,6 +827,9 @@ int mv_inference_pose_landmark_detect_open(
                return ret;
        }
 
+       std::vector<mv_source_h> sources;
+       std::vector<mv_rectangle_s> rects;
+
        sources.push_back(source);
 
        if (roi != NULL)
@@ -872,6 +842,7 @@ int mv_inference_pose_landmark_detect_open(
        }
 
        mv_inference_pose_result_h result = NULL;
+
        ret = pInfer->GetPoseLandmarkDetectionResults(
                        &result, width, height);
        if (ret != MEDIA_VISION_ERROR_NONE) {
@@ -879,13 +850,14 @@ int mv_inference_pose_landmark_detect_open(
                return ret;
        }
 
-       mv_inference_pose_s *tmp = static_cast<mv_inference_pose_s *>(result);
-       for (int pose = 0; pose < tmp->number_of_poses; ++pose) {
-               for (int index = 0; index < tmp->number_of_landmarks_per_pose; ++index) {
+       mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
+
+       for (int pose = 0; pose < pose_obj->number_of_poses; ++pose) {
+               for (int index = 0; index < pose_obj->number_of_landmarks_per_pose; ++index) {
                        LOGI("PoseIdx[%2d]: x[%d], y[%d], score[%.3f]", index,
-                                                                               tmp->landmarks[pose][index].point.x,
-                                                                               tmp->landmarks[pose][index].point.y,
-                                                                               tmp->landmarks[pose][index].score);
+                                                                               pose_obj->landmarks[pose][index].point.x,
+                                                                               pose_obj->landmarks[pose][index].point.y,
+                                                                               pose_obj->landmarks[pose][index].score);
                }
        }
 
@@ -899,9 +871,7 @@ int mv_inference_pose_get_number_of_poses_open(
                int *number_of_poses)
 {
        mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
-
        *number_of_poses = handle->number_of_poses;
-
        LOGI("%d", *number_of_poses);
 
        return MEDIA_VISION_ERROR_NONE;
@@ -912,9 +882,7 @@ int mv_inference_pose_get_number_of_landmarks_open(
                int *number_of_landmarks)
 {
        mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
-
        *number_of_landmarks = handle->number_of_landmarks_per_pose;
-
        LOGI("%d", *number_of_landmarks);
 
        return MEDIA_VISION_ERROR_NONE;
@@ -927,18 +895,16 @@ int mv_inference_pose_get_landmark_open(
                mv_point_s *location,
                float *score)
 {
-       mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
+       mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
 
-       if (pose_index < 0 || pose_index >= handle->number_of_poses)
+       if (pose_index < 0 || pose_index >= pose_obj->number_of_poses)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
-       if (part_index < 0 || part_index >= handle->number_of_landmarks_per_pose)
+       if (part_index < 0 || part_index >= pose_obj->number_of_landmarks_per_pose)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
-       *location = handle->landmarks[pose_index][part_index].point;
-
-       *score = handle->landmarks[pose_index][part_index].score;
-
+       *location = pose_obj->landmarks[pose_index][part_index].point;
+       *score = pose_obj->landmarks[pose_index][part_index].score;
        LOGI("[%d]:(%dx%d) - %.4f", pose_index, location->x, location->y, *score);
 
        return MEDIA_VISION_ERROR_NONE;
@@ -949,13 +915,12 @@ int mv_inference_pose_get_label_open(
                int pose_index,
                int *label)
 {
-       mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
+       mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
 
-       if (pose_index < 0 || pose_index >= handle->number_of_poses)
+       if (pose_index < 0 || pose_index >= pose_obj->number_of_poses)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
 
-       *label = handle->landmarks[pose_index][0].label;
-
+       *label = pose_obj->landmarks[pose_index][0].label;
        LOGI("[%d]: label(%d)", pose_index, *label);
 
        return MEDIA_VISION_ERROR_NONE;
@@ -1000,8 +965,6 @@ int mv_pose_set_from_file_open(mv_pose_h pose,
 {
        Posture *pPose = static_cast<Posture *>(pose);
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
        // check file
        if (access(motionCaptureFilePath, F_OK) || access(motionMappingFilePath, F_OK)) {
         LOGE("Invalid Motion Capture file path [%s]", motionCaptureFilePath);
@@ -1010,7 +973,7 @@ int mv_pose_set_from_file_open(mv_pose_h pose,
         return MEDIA_VISION_ERROR_INVALID_PATH;
     }
 
-       ret = pPose->setPoseFromFile(std::string(motionCaptureFilePath),
+       int ret = pPose->setPoseFromFile(std::string(motionCaptureFilePath),
                                                                std::string(motionMappingFilePath));
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to setPoseFromFile");
@@ -1023,11 +986,7 @@ int mv_pose_set_from_file_open(mv_pose_h pose,
 int mv_pose_compare_open(mv_pose_h pose, mv_inference_pose_result_h action, int parts, float *score)
 {
        Posture *pPose = static_cast<Posture *>(pose);
-
-       int ret = MEDIA_VISION_ERROR_NONE;
-
        std::vector<std::pair<bool, cv::Point>> actionParts;
-
        mv_inference_pose_s *pAction = static_cast<mv_inference_pose_s *>(action);
 
        for (int k = 0; k < HUMAN_POSE_MAX_LANDMARKS; ++k) {
@@ -1041,7 +1000,7 @@ int mv_pose_compare_open(mv_pose_h pose, mv_inference_pose_result_h action, int
 
        }
 
-       ret = pPose->compare(parts, actionParts, score);
+       int ret = pPose->compare(parts, actionParts, score);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to compare");
                return ret;