*/
int mv_inference_destroy_open(mv_inference_h infer);
- /**
- * @brief Configure the inference model data to inference handle
- *
- * @since_tizen 5.5
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
- int mv_inference_configure_model_open(mv_inference_h infer,
- mv_engine_config_h engine_config);
-
- /**
- * @brief Configure the input information to the inference handle
- *
- * @since_tizen 6.0
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
- int
- mv_inference_configure_input_info_open(mv_inference_h infer,
- mv_engine_config_h engine_config);
-
/**
* @brief Configure the backend to the inference handle
*
int mv_inference_configure_confidence_threshold_open(
mv_inference_h infer, mv_engine_config_h engine_config);
- /**
- * @brief Configure the post process infomation to the inference handle
- *
- * @since_tizen 6.0
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
- int mv_inference_configure_post_process_info_open(
- mv_inference_h infer, mv_engine_config_h engine_config);
-
/**
* @brief Configure the set of output node names to the inference handle
*
int mv_inference_configure_output_node_names_open(
mv_inference_h infer, mv_engine_config_h engine_config);
- /**
- * @brief Configure the output information to the inference handle
- *
- * @since_tizen 6.0
- *
- * @param [in] infer The handle to the inference
- * @param [in] engine_config The handle to the configuration of
- * engine.
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_PATH Invalid path of model data
- * in @a engine_config
- * @retval #MEDIA_VISION_ERROR_INVALID_DATA Invalid model data
- * @retval #MEDIA_VISION_ERROR_OUT_OF_MEMORY Out of memory
- * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
- */
- int
- mv_inference_configure_output_info_open(mv_inference_h infer,
- mv_engine_config_h engine_config);
-
/**
* @brief Prepare inference.
* @details Use this function to prepare inference based on
int ret = MEDIA_VISION_ERROR_NONE;
- mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
-
- ret = mv_inference_configure_model_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure model");
- return ret;
- }
-
- // input tensor, input layer
- ret = mv_inference_configure_input_info_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure input info");
- return ret;
- }
-
- // output layer
- ret = mv_inference_configure_output_info_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure output info");
- return ret;
- }
-
- // maximum candidates, threshold
- ret = mv_inference_configure_post_process_info_open(infer, engine_config);
- if (ret != MEDIA_VISION_ERROR_NONE) {
- LOGE("Fail to configure post process info");
- return ret;
- }
-
ret = mv_inference_prepare_open(infer);
MEDIA_VISION_FUNCTION_LEAVE();
target_device_type & MV_INFERENCE_TARGET_DEVICE_CUSTOM);
}
-int mv_inference_configure_model_open(mv_inference_h infer,
- mv_engine_config_h engine_config)
+static int configure_model_open(Inference *pInfer, mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
char *modelConfigFilePath = NULL;
char *modelWeightFilePath = NULL;
char *modelUserFilePath = NULL;
return ret;
}
-int mv_inference_configure_input_info_open(mv_inference_h infer,
- mv_engine_config_h engine_config)
+static int configure_input_info_open(Inference *pInfer, mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
int tensorWidth, tensorHeight, tensorCh;
double meanValue, stdValue;
char *node_name = NULL;
return ret;
}
-int mv_inference_configure_post_process_info_open(
- mv_inference_h infer, mv_engine_config_h engine_config)
+static int configure_post_process_info_open(Inference *pInfer, mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
int maxOutput = 0;
double threshold = 0;
return ret;
}
-int mv_inference_configure_output_info_open(mv_inference_h infer,
- mv_engine_config_h engine_config)
+static int configure_output_info_open(Inference *pInfer, mv_engine_config_h engine_config)
{
LOGI("ENTER");
- Inference *pInfer = static_cast<Inference *>(infer);
int idx = 0;
char **node_names = NULL;
int size = 0;
LOGI("ENTER");
Inference *pInfer = static_cast<Inference *>(infer);
+ mv_engine_config_h engine_config = mv_inference_get_engine_config(infer);
+
+ int ret = configure_model_open(pInfer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure model");
+ return ret;
+ }
+
+ // input tensor, input layer
+ ret = configure_input_info_open(pInfer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure input info");
+ return ret;
+ }
+
+ // output layer
+ ret = configure_output_info_open(pInfer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure output info");
+ return ret;
+ }
+
+ // maximum candidates, threshold
+ ret = configure_post_process_info_open(pInfer, engine_config);
+ if (ret != MEDIA_VISION_ERROR_NONE) {
+ LOGE("Fail to configure post process info");
+ return ret;
+ }
// Request to load model files to a backend engine.
- int ret = pInfer->Load();
+ ret = pInfer->Load();
if (ret != MEDIA_VISION_ERROR_NONE)
LOGE("Fail to load model files.");