[draft] mv_inference: refactoring 81/304781/10
authorSeungbae Shin <seungbae.shin@samsung.com>
Mon, 22 Jan 2024 10:50:47 +0000 (19:50 +0900)
committerSeungbae Shin <seungbae.shin@samsung.com>
Thu, 1 Feb 2024 05:47:03 +0000 (14:47 +0900)
Change-Id: I6a564fc04fd7181853025bd089c58d8e8ec1f369

mv_machine_learning/inference/include/Inference.h
mv_machine_learning/inference/src/Inference.cpp
mv_machine_learning/inference/src/mv_inference.cpp

index 26c1dddaa6f5c2f01fadc2b11080c3c0f3539102..576a63024852a98ce4b468512f61364113246b71 100644 (file)
@@ -154,9 +154,10 @@ public:
                 * @since_tizen 6.0
                 */
        int configureInputInfo(int width, int height, int dim, int ch, double stdValue, double meanValue, int dataType,
-                                                  const std::vector<std::string> names);
+                                                  const std::vector<std::string> &names);
 
-       int configureOutputInfo(std::vector<std::string> names, std::vector<inference_engine_tensor_info> &tensors_info);
+       int configureOutputInfo(std::vector<std::string> names,
+                                                       const std::vector<inference_engine_tensor_info> &tensors_info);
 
        /**
         * @brief Configure input information from model meta file.
index e0d6131720846bb92ef388cfb559cfb1fa583e70..e3372c6c15c6506272916d4fb5971ac7aa710a99 100644 (file)
@@ -260,7 +260,7 @@ void Inference::configureModelFiles(const std::string modelConfigFilePath, const
 }
 
 int Inference::configureInputInfo(int width, int height, int dim, int ch, double stdValue, double meanValue,
-                                                                 int dataType, const std::vector<std::string> names)
+                                                                 int dataType, const std::vector<std::string> &names)
 {
        LOGI("ENTER");
 
@@ -405,7 +405,7 @@ int Inference::setInputInfo()
 }
 
 int Inference::configureOutputInfo(const std::vector<std::string> names,
-                                                                  std::vector<inference_engine_tensor_info> &tensors_info)
+                                                                  const std::vector<inference_engine_tensor_info> &tensors_info)
 {
        LOGI("ENTER");
 
index 84dc65a7ef070e4f842171ca36801f5a3bf3eb45..d9f76682623e1a79b28ea0c8337745e58c7f06fe 100644 (file)
@@ -58,230 +58,226 @@ static bool IsConfigFilePathRequired(const int target_device_type, const int bac
 
 mv_engine_config_h mv_inference_get_engine_config(mv_inference_h infer)
 {
-       Inference *pInfer = static_cast<Inference *>(infer);
+       auto pInfer = static_cast<Inference *>(infer);
+       if (!pInfer) {
+               LOGE("Invalid infer");
+               return nullptr;
+       }
 
        return pInfer->getEngineConfig();
 }
 
 static int configure_tensor_info_from_meta_file(Inference *pInfer, mv_engine_config_h engine_config)
 {
-       char *modelMetaFilePath = NULL;
+       char *model_meta_file_path {};
 
-       int ret =
-                       mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_META_FILE_PATH, &modelMetaFilePath);
+       int ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_META_FILE_PATH,
+                                                                                                       &model_meta_file_path);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get model meta file path");
-               goto out_of_function;
+               return ret;
        }
 
-       if (std::string(modelMetaFilePath).empty()) {
+       if (!model_meta_file_path) {
+               LOGE("Invalid Fail model_meta_file_path");
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
+       }
+
+       std::string modelMetaFilePath = model_meta_file_path;
+       free(model_meta_file_path);
+
+       if (modelMetaFilePath.empty()) {
                LOGW("Skip ParseMetadata and run without Metadata");
-               ret = MEDIA_VISION_ERROR_INVALID_OPERATION;
-               goto release_model_meta_file_path;
+               return MEDIA_VISION_ERROR_INVALID_OPERATION;
        }
 
-       if (!IsJsonFile(std::string(modelMetaFilePath))) {
-               ret = MEDIA_VISION_ERROR_INVALID_PATH;
+       if (!IsJsonFile(modelMetaFilePath)) {
                LOGE("Model meta file should be json");
-               goto release_model_meta_file_path;
+               return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       ret = pInfer->parseMetadata(std::string(modelMetaFilePath));
+       ret = pInfer->parseMetadata(modelMetaFilePath);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to ParseMetadata");
+               return ret;
        }
 
-release_model_meta_file_path:
-       free(modelMetaFilePath);
-
-out_of_function:
-       LOGI("LEAVE");
+       MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 static int configure_model(Inference *pInfer, mv_engine_config_h engine_config)
 {
-       LOGI("ENTER");
+       MEDIA_VISION_FUNCTION_ENTER();
 
-       char *modelConfigFilePath = NULL;
-       char *modelWeightFilePath = NULL;
-       char *modelUserFilePath = NULL;
-       int backendType = 0;
-       size_t userFileLength = 0;
+       int ret = MEDIA_VISION_ERROR_NONE;
+       char *modelConfigFilePath {};
+       char *modelWeightFilePath {};
+       char *modelUserFilePath {};
+
+       try {
+               // modelConfigFilePath
+               int backendType = 0;
+               ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
+               if (ret != MEDIA_VISION_ERROR_NONE)
+                       throw std::runtime_error("Fail to get inference backend type");
+
+               if (!IsValidBackendType(backendType)) {
+                       ret = MEDIA_VISION_ERROR_INVALID_PARAMETER;
+                       throw std::runtime_error("Invalid backend type:" + std::to_string(backendType));
+               }
 
-       int ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
+               ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_CONFIGURATION_FILE_PATH,
                                                                                                        &modelConfigFilePath);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to get model configuration file path");
-               goto out_of_function;
-       }
-
-       ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
-                                                                                               &modelWeightFilePath);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to get model weight file path");
-               goto release_model_config_file_path;
-       }
+               if (ret != MEDIA_VISION_ERROR_NONE)
+                       throw std::runtime_error("Fail to get model configuration file path");
+
+               if (IsConfigFilePathRequired(pInfer->getTargetType(), backendType)) {
+                       if (access(modelConfigFilePath, F_OK)) {
+                               ret = MEDIA_VISION_ERROR_INVALID_PATH;
+                               throw std::runtime_error("modelConfigFilePath in:" + std::string(modelConfigFilePath));
+                       }
+               }
 
-       ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_USER_FILE_PATH, &modelUserFilePath);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to get model user file path");
-               goto release_model_weight_file_path;
-       }
+               // modelWeightFilePath
+               ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_WEIGHT_FILE_PATH,
+                                                                                                       &modelWeightFilePath);
+               if (ret != MEDIA_VISION_ERROR_NONE)
+                       throw std::runtime_error("Fail to get model weight file path");
 
-       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to get inference backend type");
-               goto release_model_user_file_path;
-       }
+               if (access(modelWeightFilePath, F_OK)) {
+                       ret = MEDIA_VISION_ERROR_INVALID_PATH;
+                       throw std::runtime_error("weightFilePath in:" + std::string(modelWeightFilePath));
+               }
 
-       if (!IsValidBackendType(backendType)) {
-               LOGE("Invalid backend type(%d).", backendType);
-               ret = MEDIA_VISION_ERROR_INVALID_PARAMETER;
-               goto release_model_user_file_path;
-       }
+               // modelUserFilePath
+               ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_MODEL_USER_FILE_PATH,
+                                                                                                       &modelUserFilePath);
+               if (ret != MEDIA_VISION_ERROR_NONE)
+                       throw std::runtime_error("Fail to get model user file path");
 
-       if (access(modelWeightFilePath, F_OK)) {
-               LOGE("weightFilePath in [%s] ", modelWeightFilePath);
-               ret = MEDIA_VISION_ERROR_INVALID_PATH;
-               goto release_model_user_file_path;
-       }
-
-       if (IsConfigFilePathRequired(pInfer->getTargetType(), backendType)) {
-               if (access(modelConfigFilePath, F_OK)) {
-                       LOGE("modelConfigFilePath in [%s] ", modelConfigFilePath);
+               if (strlen(modelUserFilePath) > 0 && access(modelUserFilePath, F_OK)) {
                        ret = MEDIA_VISION_ERROR_INVALID_PATH;
-                       goto release_model_user_file_path;
+                       throw std::runtime_error("categoryFilePath in:" + std::string(modelUserFilePath));
                }
-       }
 
-       userFileLength = strlen(modelUserFilePath);
+               pInfer->configureModelFiles(modelConfigFilePath, modelWeightFilePath, modelUserFilePath);
 
-       if (userFileLength > 0 && access(modelUserFilePath, F_OK)) {
-               LOGE("categoryFilePath in [%s] ", modelUserFilePath);
-               ret = MEDIA_VISION_ERROR_INVALID_PATH;
-               goto release_model_user_file_path;
-       }
+               MEDIA_VISION_FUNCTION_LEAVE();
 
-       pInfer->configureModelFiles(std::string(modelConfigFilePath), std::string(modelWeightFilePath),
-                                                               std::string(modelUserFilePath));
+       } catch (const std::exception &e) {
+               LOGE("%s", e.what());
+       }
 
-release_model_user_file_path:
        free(modelUserFilePath);
-
-release_model_weight_file_path:
        free(modelWeightFilePath);
-
-release_model_config_file_path:
        free(modelConfigFilePath);
 
-out_of_function:
-       LOGI("LEAVE");
-
        return ret;
 }
 
+// FIXME
 static int configure_input_info(Inference *pInfer, mv_engine_config_h engine_config)
 {
-       LOGI("ENTER");
-
-       int tensorWidth, tensorHeight, tensorCh;
-       double meanValue, stdValue;
-       char *node_name = NULL;
-       int dataType = 0;
+       MEDIA_VISION_FUNCTION_ENTER();
 
+       int tensorWidth;
        int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_TENSOR_WIDTH, &tensorWidth);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor width");
-               goto out_of_function;
+               return ret;
        }
 
+       int tensorHeight;
        ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_TENSOR_HEIGHT, &tensorHeight);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor height");
-               goto out_of_function;
+               return ret;
        }
 
+       int tensorCh;
        ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_TENSOR_CHANNELS, &tensorCh);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get tensor channels");
-               goto out_of_function;
+               return ret;
        }
 
+       double meanValue;
        ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_MODEL_MEAN_VALUE, &meanValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get meanValue");
-               goto out_of_function;
+               return ret;
        }
 
+       double stdValue;
        ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_MODEL_STD_VALUE, &stdValue);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get stdValue");
-               goto out_of_function;
+               return ret;
        }
 
+       int dataType = 0;
        ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_INPUT_DATA_TYPE, &dataType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get a input tensor data type");
-               goto out_of_function;
+               return ret;
        }
 
+       char *node_name {};
        ret = mv_engine_config_get_string_attribute(engine_config, MV_INFERENCE_INPUT_NODE_NAME, &node_name);
        if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to get tensor width");
-               goto out_of_function;
+               LOGE("Fail to get input node name");
+               return ret;
        }
 
-       pInfer->configureInputInfo(tensorWidth, tensorHeight, 1, tensorCh, stdValue, meanValue, dataType,
-                                                          std::vector<std::string>(1, std::string(node_name)));
+       pInfer->configureInputInfo(tensorWidth, tensorHeight, 1, tensorCh, stdValue, meanValue, dataType, { node_name });
 
        free(node_name);
-       node_name = NULL;
 
-out_of_function:
-       LOGI("LEAVE");
+       MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 static int configure_post_process_info(Inference *pInfer, mv_engine_config_h engine_config)
 {
-       LOGI("ENTER");
-
-       int maxOutput = 0;
-       double threshold = 0;
+       MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to get inference output maximum numbers");
-               goto out_of_function;
-       }
+       int ret = MEDIA_VISION_ERROR_NONE;
 
-       pInfer->configureOutput(maxOutput);
+       try {
+               pInfer->configureOutput([&] {
+                       int maxOutput = 0;
+                       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_OUTPUT_MAX_NUMBER, &maxOutput);
+                       if (ret != MEDIA_VISION_ERROR_NONE)
+                               throw std::runtime_error("Fail to get inference output maximum numbers");
+                       return maxOutput;
+               }());
 
-       ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to get inference confidence threshold value");
-               goto out_of_function;
-       }
+               pInfer->configureThreshold([&] {
+                       double threshold = 0;
+                       ret = mv_engine_config_get_double_attribute(engine_config, MV_INFERENCE_CONFIDENCE_THRESHOLD, &threshold);
+                       if (ret != MEDIA_VISION_ERROR_NONE)
+                               throw std::runtime_error("Fail to get inference confidence threshold value");
+                       return threshold;
+               }());
 
-       pInfer->configureThreshold(threshold);
+               MEDIA_VISION_FUNCTION_LEAVE();
 
-out_of_function:
-       LOGI("LEAVE");
+       } catch (const std::exception &e) {
+               LOGE("%s", e.what());
+       }
 
        return ret;
 }
 
 static int configure_output_info(Inference *pInfer, mv_engine_config_h engine_config)
 {
-       LOGI("ENTER");
+       MEDIA_VISION_FUNCTION_ENTER();
 
-       int idx = 0;
-       char **node_names = NULL;
+       char **node_names {};
        int size = 0;
-       std::vector<std::string> names;
 
        int ret = mv_engine_config_get_array_string_attribute(engine_config, MV_INFERENCE_OUTPUT_NODE_NAMES, &node_names,
                                                                                                                  &size);
@@ -290,24 +286,15 @@ static int configure_output_info(Inference *pInfer, mv_engine_config_h engine_co
                return ret;
        }
 
-       for (idx = 0; idx < size; ++idx)
-               names.push_back(std::string(node_names[idx]));
-
-       std::vector<inference_engine_tensor_info> tensors_info;
-
-       pInfer->configureOutputInfo(names, tensors_info);
-
-       if (node_names) {
-               for (idx = 0; idx < size; ++idx)
-                       free(node_names[idx]);
+       pInfer->configureOutputInfo({ node_names, node_names + size }, {});
 
-               free(node_names);
-               node_names = NULL;
-       }
+       for (int idx = 0; idx < size; ++idx)
+               free(node_names[idx]);
+       free(node_names);
 
-       LOGI("LEAVE");
+       MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_create(mv_inference_h *infer)
@@ -317,19 +304,18 @@ int mv_inference_create(mv_inference_h *infer)
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       (*infer) = static_cast<mv_inference_h>(new (std::nothrow) Inference());
-
-       if (*infer == NULL) {
+       auto pInfer = static_cast<mv_inference_h>(new (std::nothrow) Inference());
+       if (!pInfer) {
                LOGE("Failed to create inference handle");
-               ret = MEDIA_VISION_ERROR_OUT_OF_MEMORY;
+               return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       LOGD("Inference handle [%p] has been created", *infer);
+       LOGD("Inference handle [%p] has been created", pInfer);
+       *infer = pInfer;
 
        MEDIA_VISION_FUNCTION_LEAVE();
-       return ret;
+
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_destroy(mv_inference_h infer)
@@ -339,14 +325,13 @@ int mv_inference_destroy(mv_inference_h infer)
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
        LOGD("Destroying inference handle [%p]", infer);
        delete static_cast<Inference *>(infer);
        LOGD("Inference handle has been destroyed");
 
        MEDIA_VISION_FUNCTION_LEAVE();
-       return ret;
+
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_config)
@@ -357,20 +342,18 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       Inference *pInfer = static_cast<Inference *>(infer);
-       int backendType = 0;
-       int targetTypes = 0;
+       auto pInfer = static_cast<Inference *>(infer);
 
        pInfer->setEngineConfig(engine_config);
 
-       ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
+       int backendType = 0;
+       int ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_BACKEND_TYPE, &backendType);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference backend type");
                return ret;
        }
 
+       int targetTypes = 0;
        ret = mv_engine_config_get_int_attribute(engine_config, MV_INFERENCE_TARGET_DEVICE_TYPE, &targetTypes);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference target type");
@@ -378,7 +361,8 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
        }
 
        // Set target device type.
-       if (pInfer->configureTargetDevices(targetTypes) != MEDIA_VISION_ERROR_NONE) {
+       ret = pInfer->configureTargetDevices(targetTypes);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Tried to configure invalid target types.");
                return ret;
        }
@@ -393,7 +377,8 @@ int mv_inference_configure(mv_inference_h infer, mv_engine_config_h engine_confi
        }
 
        MEDIA_VISION_FUNCTION_LEAVE();
-       return ret;
+
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_prepare(mv_inference_h infer)
@@ -403,12 +388,10 @@ int mv_inference_prepare(mv_inference_h infer)
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       Inference *pInfer = static_cast<Inference *>(infer);
+       auto pInfer = static_cast<Inference *>(infer);
        mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
 
-       ret = configure_model(pInfer, engine_config);
+       int ret = configure_model(pInfer, engine_config);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to configure model");
                return ret;
@@ -454,11 +437,14 @@ int mv_inference_prepare(mv_inference_h infer)
 
        // Request to load model files to a backend engine.
        ret = pInfer->load();
-       if (ret != MEDIA_VISION_ERROR_NONE)
+       if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to load model files.");
+               return ret;
+       }
 
        MEDIA_VISION_FUNCTION_LEAVE();
-       return ret;
+
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_foreach_supported_engine(mv_inference_h infer, mv_inference_supported_engine_cb callback,
@@ -469,19 +455,19 @@ int mv_inference_foreach_supported_engine(mv_inference_h infer, mv_inference_sup
        MEDIA_VISION_NULL_ARG_CHECK(callback);
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
+       auto pInfer = static_cast<Inference *>(infer);
 
-       Inference *pInfer = static_cast<Inference *>(infer);
-       std::pair<std::string, bool> backend;
+       std::string name;
+       bool supported = false;
 
        for (int i = 0; i < MV_INFERENCE_BACKEND_MAX; ++i) {
-               backend = pInfer->getSupportedInferenceBackend(i);
-               callback((backend.first).c_str(), backend.second, user_data);
+               std::tie(name, supported) = pInfer->getSupportedInferenceBackend(i);
+               callback(name.c_str(), supported, user_data);
        }
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_image_classify(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
@@ -494,25 +480,20 @@ int mv_inference_image_classify(mv_source_h source, mv_inference_h infer, mv_rec
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
+       auto pInfer = static_cast<Inference *>(infer);
 
-       Inference *pInfer = static_cast<Inference *>(infer);
-       std::vector<mv_source_h> sources;
+       std::vector<mv_source_h> sources { source };
        std::vector<mv_rectangle_s> rects;
-
-       sources.push_back(source);
-
-       if (roi != NULL)
+       if (roi)
                rects.push_back(*roi);
 
-       ret = pInfer->run(sources, rects);
+       int ret = pInfer->run(sources, rects);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to run inference");
                return ret;
        }
 
        ImageClassificationResults classificationResults;
-
        ret = pInfer->getClassficationResults(&classificationResults);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference results");
@@ -520,27 +501,21 @@ int mv_inference_image_classify(mv_source_h source, mv_inference_h infer, mv_rec
        }
 
        int numberOfOutputs = classificationResults.number_of_classes;
-       static const int START_CLASS_NUMBER = 10;
-       static std::vector<const char *> names(START_CLASS_NUMBER);
-
-       if (numberOfOutputs > START_CLASS_NUMBER)
-               names.resize(numberOfOutputs);
+       LOGI("number_of_classes: %d", numberOfOutputs);
 
-       LOGI("mv_inference: number_of_classes: %d\n", numberOfOutputs);
+       std::vector<const char *> names(numberOfOutputs);
 
        for (int output_index = 0; output_index < numberOfOutputs; ++output_index) {
-               LOGI("names: %s", classificationResults.names[output_index].c_str());
                names[output_index] = classificationResults.names[output_index].c_str();
+               LOGI("[%d] name:%s", output_index, names[output_index]);
        }
 
-       auto *indices = classificationResults.indices.data();
-       auto *confidences = classificationResults.confidences.data();
-
-       classified_cb(source, numberOfOutputs, indices, names.data(), confidences, user_data);
+       classified_cb(source, numberOfOutputs, classificationResults.indices.data(), names.data(),
+                                 classificationResults.confidences.data(), user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_object_detect(mv_source_h source, mv_inference_h infer, mv_inference_object_detected_cb detected_cb,
@@ -553,21 +528,18 @@ int mv_inference_object_detect(mv_source_h source, mv_inference_h infer, mv_infe
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
+       auto pInfer = static_cast<Inference *>(infer);
 
-       Inference *pInfer = static_cast<Inference *>(infer);
-       std::vector<mv_source_h> sources;
+       std::vector<mv_source_h> sources { source };
        std::vector<mv_rectangle_s> rects;
 
-       sources.push_back(source);
-       ret = pInfer->run(sources, rects);
+       int ret = pInfer->run(sources, rects);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to run inference");
                return ret;
        }
 
        ObjectDetectionResults objectDetectionResults;
-
        ret = pInfer->getObjectDetectionResults(&objectDetectionResults);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get inference results");
@@ -592,18 +564,17 @@ int mv_inference_object_detect(mv_source_h source, mv_inference_h infer, mv_infe
                locations[output_idx].point.y = objectDetectionResults.locations[output_idx].y;
                locations[output_idx].width = objectDetectionResults.locations[output_idx].width;
                locations[output_idx].height = objectDetectionResults.locations[output_idx].height;
+
                LOGI("%d, %d, %d, %d", locations[output_idx].point.x, locations[output_idx].point.y,
                         locations[output_idx].width, locations[output_idx].height);
        }
 
-       int *indices = objectDetectionResults.indices.data();
-       float *confidences = objectDetectionResults.confidences.data();
-
-       detected_cb(source, numberOfOutputs, indices, names.data(), confidences, locations.data(), user_data);
+       detected_cb(source, numberOfOutputs, objectDetectionResults.indices.data(), names.data(),
+                               objectDetectionResults.confidences.data(), locations.data(), user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_face_detect(mv_source_h source, mv_inference_h infer, mv_inference_face_detected_cb detected_cb,
@@ -616,15 +587,11 @@ int mv_inference_face_detect(mv_source_h source, mv_inference_h infer, mv_infere
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       Inference *pInfer = static_cast<Inference *>(infer);
-       std::vector<mv_source_h> sources;
+       auto pInfer = static_cast<Inference *>(infer);
+       std::vector<mv_source_h> sources { source };
        std::vector<mv_rectangle_s> rects;
 
-       sources.push_back(source);
-
-       ret = pInfer->run(sources, rects);
+       int ret = pInfer->run(sources, rects);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to run inference");
                return ret;
@@ -648,13 +615,11 @@ int mv_inference_face_detect(mv_source_h source, mv_inference_h infer, mv_infere
                locations[output_idx].height = faceDetectionResults.locations[output_idx].height;
        }
 
-       float *confidences = faceDetectionResults.confidences.data();
-
-       detected_cb(source, numberOfOutputs, confidences, locations.data(), user_data);
+       detected_cb(source, numberOfOutputs, faceDetectionResults.confidences.data(), locations.data(), user_data);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_facial_landmark_detect(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
@@ -667,18 +632,14 @@ int mv_inference_facial_landmark_detect(mv_source_h source, mv_inference_h infer
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
+       auto pInfer = static_cast<Inference *>(infer);
 
-       Inference *pInfer = static_cast<Inference *>(infer);
-       std::vector<mv_source_h> sources;
+       std::vector<mv_source_h> sources { source };
        std::vector<mv_rectangle_s> rects;
-
-       sources.push_back(source);
-
-       if (roi != NULL)
+       if (roi)
                rects.push_back(*roi);
 
-       ret = pInfer->run(sources, rects);
+       int ret = pInfer->run(sources, rects);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to run inference");
                return ret;
@@ -704,7 +665,7 @@ int mv_inference_facial_landmark_detect(mv_source_h source, mv_inference_h infer
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_pose_landmark_detect(mv_source_h source, mv_inference_h infer, mv_rectangle_s *roi,
@@ -717,29 +678,25 @@ int mv_inference_pose_landmark_detect(mv_source_h source, mv_inference_h infer,
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       Inference *pInfer = static_cast<Inference *>(infer);
-       unsigned int width, height;
+       auto pInfer = static_cast<Inference *>(infer);
 
-       ret = mv_source_get_width(source, &width);
+       unsigned int width;
+       int ret = mv_source_get_width(source, &width);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get width");
                return ret;
        }
 
+       unsigned int height;
        ret = mv_source_get_height(source, &height);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to get height");
                return ret;
        }
 
-       std::vector<mv_source_h> sources;
+       std::vector<mv_source_h> sources { source };
        std::vector<mv_rectangle_s> rects;
-
-       sources.push_back(source);
-
-       if (roi != NULL)
+       if (roi)
                rects.push_back(*roi);
 
        ret = pInfer->run(sources, rects);
@@ -769,7 +726,7 @@ int mv_inference_pose_landmark_detect(mv_source_h source, mv_inference_h infer,
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_pose_get_number_of_poses(mv_inference_pose_result_h result, int *number_of_poses)
@@ -781,15 +738,13 @@ int mv_inference_pose_get_number_of_poses(mv_inference_pose_result_h result, int
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
+       auto handle = static_cast<mv_inference_pose_s *>(result);
        *number_of_poses = handle->number_of_poses;
        LOGI("%d", *number_of_poses);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_pose_get_number_of_landmarks(mv_inference_pose_result_h result, int *number_of_landmarks)
@@ -801,15 +756,13 @@ int mv_inference_pose_get_number_of_landmarks(mv_inference_pose_result_h result,
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       mv_inference_pose_s *handle = static_cast<mv_inference_pose_s *>(result);
+       auto handle = static_cast<mv_inference_pose_s *>(result);
        *number_of_landmarks = handle->number_of_landmarks_per_pose;
        LOGI("%d", *number_of_landmarks);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_pose_get_landmark(mv_inference_pose_result_h result, int pose_index, int part_index,
@@ -823,9 +776,7 @@ int mv_inference_pose_get_landmark(mv_inference_pose_result_h result, int pose_i
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
+       auto pose_obj = static_cast<mv_inference_pose_s *>(result);
 
        if (pose_index < 0 || pose_index >= pose_obj->number_of_poses)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -835,11 +786,12 @@ int mv_inference_pose_get_landmark(mv_inference_pose_result_h result, int pose_i
 
        *location = pose_obj->landmarks[pose_index][part_index].point;
        *score = pose_obj->landmarks[pose_index][part_index].score;
+
        LOGI("[%d]:(%dx%d) - %.4f", pose_index, location->x, location->y, *score);
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_inference_pose_get_label(mv_inference_pose_result_h result, int pose_index, int *label)
@@ -851,9 +803,7 @@ int mv_inference_pose_get_label(mv_inference_pose_result_h result, int pose_inde
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       mv_inference_pose_s *pose_obj = static_cast<mv_inference_pose_s *>(result);
+       auto pose_obj = static_cast<mv_inference_pose_s *>(result);
 
        if (pose_index < 0 || pose_index >= pose_obj->number_of_poses)
                return MEDIA_VISION_ERROR_INVALID_PARAMETER;
@@ -863,7 +813,7 @@ int mv_inference_pose_get_label(mv_inference_pose_result_h result, int pose_inde
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_pose_create(mv_pose_h *pose)
@@ -873,20 +823,18 @@ int mv_pose_create(mv_pose_h *pose)
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       (*pose) = static_cast<mv_pose_h>(new (std::nothrow) Posture);
-
-       if (*pose == NULL) {
+       auto pPose = static_cast<mv_pose_h>(new (std::nothrow) Posture);
+       if (!pPose) {
                LOGE("Failed to create pose handle");
                return MEDIA_VISION_ERROR_OUT_OF_MEMORY;
        }
 
-       LOGD("Inference handle [%p] has been created", *pose);
+       LOGD("Inference handle [%p] has been created", pPose);
+       *pose = pPose;
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_pose_destroy(mv_pose_h pose)
@@ -896,15 +844,13 @@ int mv_pose_destroy(mv_pose_h pose)
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
        LOGD("Destroy pose handle [%p]", pose);
        delete static_cast<Posture *>(pose);
        LOGD("Pose handle has been destroyed");
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_pose_set_from_file(mv_pose_h pose, const char *motion_capture_file_path, const char *motion_mapping_file_path)
@@ -916,10 +862,6 @@ int mv_pose_set_from_file(mv_pose_h pose, const char *motion_capture_file_path,
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
-
-       Posture *pPose = static_cast<Posture *>(pose);
-
        // check file
        if (access(motion_capture_file_path, F_OK) || access(motion_mapping_file_path, F_OK)) {
                LOGE("Invalid Motion Capture file path [%s]", motion_capture_file_path);
@@ -928,7 +870,9 @@ int mv_pose_set_from_file(mv_pose_h pose, const char *motion_capture_file_path,
                return MEDIA_VISION_ERROR_INVALID_PATH;
        }
 
-       ret = pPose->setPoseFromFile(std::string(motion_capture_file_path), std::string(motion_mapping_file_path));
+       auto pPose = static_cast<Posture *>(pose);
+
+       int ret = pPose->setPoseFromFile(motion_capture_file_path, motion_mapping_file_path);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to setPoseFromFile");
                return ret;
@@ -936,7 +880,7 @@ int mv_pose_set_from_file(mv_pose_h pose, const char *motion_capture_file_path,
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }
 
 int mv_pose_compare(mv_pose_h pose, mv_inference_pose_result_h action, int parts, float *score)
@@ -948,23 +892,21 @@ int mv_pose_compare(mv_pose_h pose, mv_inference_pose_result_h action, int parts
 
        MEDIA_VISION_FUNCTION_ENTER();
 
-       int ret = MEDIA_VISION_ERROR_NONE;
+       auto pPose = static_cast<Posture *>(pose);
+       auto pAction = static_cast<mv_inference_pose_s *>(action);
 
-       Posture *pPose = static_cast<Posture *>(pose);
        std::vector<std::pair<bool, cv::Point> > actionParts;
-       mv_inference_pose_s *pAction = static_cast<mv_inference_pose_s *>(action);
 
        for (int k = 0; k < HUMAN_POSE_MAX_LANDMARKS; ++k) {
                if (pAction->landmarks[0][k].point.x == -1 || pAction->landmarks[0][k].point.y == -1) {
-                       actionParts.push_back(std::make_pair(false, cv::Point(-1, -1)));
+                       actionParts.push_back({ false, cv::Point(-1, -1) });
                        continue;
                }
 
-               actionParts.push_back(
-                               std::make_pair(true, cv::Point(pAction->landmarks[0][k].point.x, pAction->landmarks[0][k].point.y)));
+               actionParts.push_back({ true, cv::Point(pAction->landmarks[0][k].point.x, pAction->landmarks[0][k].point.y) });
        }
 
-       ret = pPose->compare(parts, actionParts, score);
+       int ret = pPose->compare(parts, actionParts, score);
        if (ret != MEDIA_VISION_ERROR_NONE) {
                LOGE("Fail to compare");
                return ret;
@@ -974,5 +916,5 @@ int mv_pose_compare(mv_pose_h pose, mv_inference_pose_result_h action, int parts
 
        MEDIA_VISION_FUNCTION_LEAVE();
 
-       return ret;
+       return MEDIA_VISION_ERROR_NONE;
 }