mv_machine_learning: code cleanup with code sliding
authorInki Dae <inki.dae@samsung.com>
Fri, 27 May 2022 08:28:56 +0000 (17:28 +0900)
committerInki Dae <inki.dae@samsung.com>
Wed, 20 Jul 2022 05:16:57 +0000 (14:16 +0900)
[Issue type] : code cleanup

Cleaned up mv_inference_prepare and mv_inference_prepare_open functions
by doing code sliding. Four configuration functions don't have to be called
in mv_inference.c so move them to mv_inference_open.cpp
because they configure information required by Load function.

This is a cleanup work for gathering relevant code to same place
for next code refactoring.

Change-Id: I2c9627832c77888d084a4c7441d9f70fa46a4317
Signed-off-by: Inki Dae <inki.dae@samsung.com>
mv_machine_learning/inference/include/mv_inference_open.h
mv_machine_learning/inference/src/mv_inference.c
mv_machine_learning/inference/src/mv_inference_open.cpp

index 159163d41cf4595ed2b79b19e57965dade2403d3..c005bd06c26878e0fb488b0cb25c7503446a3447 100644 (file)
@@ -78,51 +78,6 @@ extern "C"
         */
        int mv_inference_destroy_open(mv_inference_h infer);
 
-       /**
-        * @brief Configure the inference model data to inference handle
-        *
-        * @since_tizen 5.5
-        *
-        * @param [in] infer         The handle to the inference
-        * @param [in] engine_config The handle to the configuration of
-        *                           engine.
-        *
-        * @return @c 0 on success, otherwise a negative error value
-        * @retval #MEDIA_VISION_ERROR_NONE Successful
-        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
-        *                                               in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
-        *                                          in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
-        * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
-        * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
-        */
-       int mv_inference_configure_model_open(mv_inference_h infer,
-                                                                                 mv_engine_config_h engine_config);
-
-               /**
-        * @brief Configure the input information to the inference handle
-        *
-        * @since_tizen 6.0
-        *
-        * @param [in] infer         The handle to the inference
-        * @param [in] engine_config The handle to the configuration of
-        *                           engine.
-        *
-        * @return @c 0 on success, otherwise a negative error value
-        * @retval #MEDIA_VISION_ERROR_NONE Successful
-        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
-        *                                               in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
-        *                                          in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
-        * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
-        * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
-        */
-       int
-       mv_inference_configure_input_info_open(mv_inference_h infer,
-                                                                                  mv_engine_config_h engine_config);
-
        /**
         * @brief Configure the backend to the inference handle
         *
@@ -191,28 +146,6 @@ extern "C"
        int mv_inference_configure_confidence_threshold_open(
                        mv_inference_h infer, mv_engine_config_h engine_config);
 
-       /**
-        * @brief Configure the post process infomation to the inference handle
-        *
-        * @since_tizen 6.0
-        *
-        * @param [in] infer         The handle to the inference
-        * @param [in] engine_config The handle to the configuration of
-        *                           engine.
-        *
-        * @return @c 0 on success, otherwise a negative error value
-        * @retval #MEDIA_VISION_ERROR_NONE Successful
-        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
-        *                                               in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
-        *                                          in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
-        * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
-        * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
-        */
-       int mv_inference_configure_post_process_info_open(
-                       mv_inference_h infer, mv_engine_config_h engine_config);
-
        /**
         * @brief Configure the set of output node names to the inference handle
         *
@@ -236,29 +169,6 @@ extern "C"
        int mv_inference_configure_output_node_names_open(
                        mv_inference_h infer, mv_engine_config_h engine_config);
 
-       /**
-        * @brief Configure the output information to the inference handle
-        *
-        * @since_tizen 6.0
-        *
-        * @param [in] infer         The handle to the inference
-        * @param [in] engine_config The handle to the configuration of
-        *                           engine.
-        *
-        * @return @c 0 on success, otherwise a negative error value
-        * @retval #MEDIA_VISION_ERROR_NONE Successful
-        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
-        *                                               in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
-        *                                          in @a engine_config
-        * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
-        * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
-        * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
-        */
-       int
-       mv_inference_configure_output_info_open(mv_inference_h infer,
-                                                                                       mv_engine_config_h engine_config);
-
        /**
         * @brief Prepare inference.
         * @details Use this function to prepare inference based on
index 84c9b7d437db520f963fbb5acd2bc8df15f879b4..4acf690b3a27721f7308058b3935aafe3b333b3c 100644 (file)
@@ -87,35 +87,6 @@ int mv_inference_prepare(mv_inference_h infer)
 
        int ret = MEDIA_VISION_ERROR_NONE;
 
-       mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
-
-       ret = mv_inference_configure_model_open(infer, engine_config);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to configure model");
-               return ret;
-       }
-
-       // input tensor, input layer
-       ret = mv_inference_configure_input_info_open(infer, engine_config);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to configure input info");
-               return ret;
-       }
-
-       // output layer
-       ret = mv_inference_configure_output_info_open(infer, engine_config);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to configure output info");
-               return ret;
-       }
-
-       // maximum candidates, threshold
-       ret = mv_inference_configure_post_process_info_open(infer, engine_config);
-       if (ret != MEDIA_VISION_ERROR_NONE) {
-               LOGE("Fail to configure post process info");
-               return ret;
-       }
-
        ret = mv_inference_prepare_open(infer);
 
        MEDIA_VISION_FUNCTION_LEAVE();
index fbdb8194be9c856e933d4546053b83a7b8d334fa..a9e30e356af1141a21323b1b791ae076a676f8f1 100644 (file)
@@ -132,12 +132,10 @@ static bool IsConfigFilePathRequired(const int target_device_type, const int bac
                        target_device_type & MV_INFERENCE_TARGET_DEVICE_CUSTOM);
 }
 
-int mv_inference_configure_model_open(mv_inference_h infer,
-                                                                         mv_engine_config_h engine_config)
+static int configure_model_open(Inference *pInfer, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
-       Inference *pInfer = static_cast<Inference *>(infer);
        char *modelConfigFilePath = NULL;
        char *modelWeightFilePath = NULL;
        char *modelUserFilePath = NULL;
@@ -254,12 +252,10 @@ out_of_function:
        return ret;
 }
 
-int mv_inference_configure_input_info_open(mv_inference_h infer,
-                                                                                  mv_engine_config_h engine_config)
+static int configure_input_info_open(Inference *pInfer, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
-       Inference *pInfer = static_cast<Inference *>(infer);
        int tensorWidth, tensorHeight, tensorCh;
        double meanValue, stdValue;
        char *node_name = NULL;
@@ -434,12 +430,10 @@ out_of_function:
        return ret;
 }
 
-int mv_inference_configure_post_process_info_open(
-               mv_inference_h infer, mv_engine_config_h engine_config)
+static int configure_post_process_info_open(Inference *pInfer, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
-       Inference *pInfer = static_cast<Inference *>(infer);
        int maxOutput = 0;
        double threshold = 0;
 
@@ -467,12 +461,10 @@ out_of_function:
        return ret;
 }
 
-int mv_inference_configure_output_info_open(mv_inference_h infer,
-                                                                                       mv_engine_config_h engine_config)
+static int configure_output_info_open(Inference *pInfer, mv_engine_config_h engine_config)
 {
        LOGI("ENTER");
 
-       Inference *pInfer = static_cast<Inference *>(infer);
        int idx = 0;
        char **node_names = NULL;
        int size = 0;
@@ -510,9 +502,37 @@ int mv_inference_prepare_open(mv_inference_h infer)
        LOGI("ENTER");
 
        Inference *pInfer = static_cast<Inference *>(infer);
+       mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
+
+       int ret = configure_model_open(pInfer, engine_config);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to configure model");
+               return ret;
+       }
+
+       // input tensor, input layer
+       ret = configure_input_info_open(pInfer, engine_config);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to configure input info");
+               return ret;
+       }
+
+       // output layer
+       ret = configure_output_info_open(pInfer, engine_config);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to configure output info");
+               return ret;
+       }
+
+       // maximum candidates, threshold
+       ret = configure_post_process_info_open(pInfer, engine_config);
+       if (ret != MEDIA_VISION_ERROR_NONE) {
+               LOGE("Fail to configure post process info");
+               return ret;
+       }
 
        // Request to load model files to a backend engine.
-       int ret = pInfer->Load();
+       ret = pInfer->Load();
        if (ret != MEDIA_VISION_ERROR_NONE)
                LOGE("Fail to load model files.");