*
* @since_tizen 6.0
*
- * @param[in] result The handle to inference result
- * @param[out] number_of_poses The pointer to the number of poses
+ * @param[in] result The handle to inference result
+ * @param[out] number_of_poses The pointer to the number of poses
*
* @return @c 0 on success, otherwise a negative error value
* @retval #MEDIA_VISION_ERROR_NONE Successful
*
* @since_tizen 6.0
*
- * @param[in] result The handle to inference result
- * @param[out] number_of_landmarks The pointer to the number of landmarks
+ * @param[in] result The handle to inference result
+ * @param[out] number_of_landmarks The pointer to the number of landmarks
*
* @return @c 0 on success, otherwise a negative error value
* @retval #MEDIA_VISION_ERROR_NONE Successful
*
* @since_tizen 6.0
*
- * @param[in] result The handle to inference result
- * @param[in] pose_index The pose index between 0 and
- * the number of poses which can be gotten by
- * mv_inference_pose_get_number_of_poses()
- * @param[in] pose_part The landmark index between 0 and
- * the number of landmarks which can be gotten by
- * mv_inference_pose_get_number_of_landmarks()
+ * @param[in] result The handle to inference result
+ * @param[in] pose_index The pose index between 0 and
+ * the number of poses which can be gotten by
+ * mv_inference_pose_get_number_of_poses()
+ * @param[in] pose_part The landmark index between 0 and
+ * the number of landmarks which can be gotten by
+ * mv_inference_pose_get_number_of_landmarks()
* @param[out] location The location of a landmark
* @param[out] score The score of a landmark
*
*
* @since_tizen 6.0
*
- * @param[in] result The handle to inference result
- * @param[in] pose_index The pose index between 0 and
- * the number of poses which can be gotten by
- * mv_inference_pose_get_number_of_poses()
+ * @param[in] result The handle to inference result
+ * @param[in] pose_index The pose index between 0 and
+ * the number of poses which can be gotten by
+ * mv_inference_pose_get_number_of_poses()
* @param[out] label The label of a pose
*
* @return @c 0 on success, otherwise a negative error value
struct InferenceConfig {
/**
- * @brief Default constructor for the @ref InferenceConfig
- *
- * @since_tizen 5.0
- */
+ * @brief Default constructor for the @ref InferenceConfig
+ *
+ * @since_tizen 5.0
+ */
InferenceConfig();
std::string mConfigFilePath; /**< Path of a model configuration file */
{
public:
/**
- * @brief Creates an Inference class instance.
- *
- * @since_tizen 5.5
- */
+ * @brief Creates an Inference class instance.
+ *
+ * @since_tizen 5.5
+ */
Inference();
/**
- * @brief Destroys an Inference class instance including
- * its all resources.
- *
- * @since_tizen 5.5
- */
+ * @brief Destroys an Inference class instance including
+ * its all resources.
+ *
+ * @since_tizen 5.5
+ */
~Inference();
/**
- * @brief Configure modelfiles
- *
- * @since_tizen 5.5
- */
+ * @brief Configure modelfiles
+ *
+ * @since_tizen 5.5
+ */
void configureModelFiles(const std::string modelConfigFilePath, const std::string modelWeightFilePath,
const std::string modelUserFilePath);
/**
- * @brief Configure input information
- *
- * @since_tizen 6.0
- */
+ * @brief Configure input information
+ *
+ * @since_tizen 6.0
+ */
int configureInputInfo(int width, int height, int dim, int ch, double stdValue, double meanValue, int dataType,
const std::vector<std::string> &names);
const std::vector<inference_engine_tensor_info> &tensors_info);
/**
- * @brief Configure input information from model meta file.
+ * @brief Configure input information from model meta file.
*
* @since_tizen 7.0
*/
int configureInputMetaInfo(mediavision::machine_learning::MetaMap &inputMetaInfo);
/**
- * @brief Configure output information from model meta file.
+ * @brief Configure output information from model meta file.
*
* @since_tizen 7.0
*/
int configureOutputMetaInfo(mediavision::machine_learning::MetaMap &outputMetaInfo);
/**
- * @brief Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
- *
- * @since_tizen 6.0
- */
+ * @brief Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
+ *
+ * @since_tizen 6.0
+ */
int configureTargetDevices(const int targetDevices);
/**
- * @brief Configure the maximum number of inference results
- *
- * @since_tizen 5.5
- */
+ * @brief Configure the maximum number of inference results
+ *
+ * @since_tizen 5.5
+ */
void configureOutput(const int maxOutputNumbers);
/**
- * @brief Configure the confidence threshold
- *
- * @since_tizen 5.5
- */
+ * @brief Configure the confidence threshold
+ *
+ * @since_tizen 5.5
+ */
void configureThreshold(const double threshold);
/**
- * @brief Parses the metadata file path
- *
- * @since_tizen 6.5
- */
+ * @brief Parses the metadata file path
+ *
+ * @since_tizen 6.5
+ */
int parseMetadata(const std::string filePath);
/**
- * @brief Bind a backend engine
- * @details Use this function to bind a backend engine for the inference.
- * This creates a inference engine common class object, and loads a backend
- * library which interfaces with a Neural Network runtime such as TF Lite,
- * OpenCV, ARMNN and so on.
- *
- * Ps. The created inference engine common object will be released and its
- * corresponding backend library will be unbound when deconstructor
- * of Inference class will be called.
- *
- * @since_tizen 6.0
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- */
+ * @brief Bind a backend engine
+ * @details Use this function to bind a backend engine for the inference.
+ * This creates a inference engine common class object, and loads a backend
+ * library which interfaces with a Neural Network runtime such as TF Lite,
+ * OpenCV, ARMNN and so on.
+ *
+ * Ps. The created inference engine common object will be released and its
+ * corresponding backend library will be unbound when deconstructor
+ * of Inference class will be called.
+ *
+ * @since_tizen 6.0
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
int bind(int backend_type, int device_type);
/**
- * @brief Load model files
- * @details Use this function to load given model files for the inference.
- *
- * Ps. this callback should be called after Prepare callback.
- *
- * @since_tizen 6.0
- *
- * @return @c 0 on success, otherwise a negative error value
- * @retval #MEDIA_VISION_ERROR_NONE Successful
- * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
- */
+ * @brief Load model files
+ * @details Use this function to load given model files for the inference.
+ *
+ * Ps. this callback should be called after Prepare callback.
+ *
+ * @since_tizen 6.0
+ *
+ * @return @c 0 on success, otherwise a negative error value
+ * @retval #MEDIA_VISION_ERROR_NONE Successful
+ * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+ */
int load();
/**
- * @brief Runs inference with a region of a given image
- * @details Use this function to run forward pass with the given image.
- * The given image is preprocessed and the region of the image is
- * thrown to neural network. Then, the output tensor is returned.
- * If roi is NULL, then full source will be analyzed.
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
+ * @brief Runs inference with a region of a given image
+ * @details Use this function to run forward pass with the given image.
+ * The given image is preprocessed and the region of the image is
+ * thrown to neural network. Then, the output tensor is returned.
+ * If roi is NULL, then full source will be analyzed.
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
int run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle_s> &rects);
template<typename T> int run(std::vector<std::vector<T> > &input_tensors);
int run();
/**
- * @brief Gets that given engine is supported or not
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
+ * @brief Gets that given engine is supported or not
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
std::pair<std::string, bool> getSupportedInferenceBackend(int backend);
/**
- * @brief Gets the ImageClassificationResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
+ * @brief Gets the ImageClassificationResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
int getClassficationResults(ImageClassificationResults *results);
/**
- * @brief Gets the ObjectDetectioResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
+ * @brief Gets the ObjectDetectioResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
int getObjectDetectionResults(ObjectDetectionResults *results);
/**
- * @brief Gets the FaceDetectioResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
+ * @brief Gets the FaceDetectioResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
int getFaceDetectionResults(FaceDetectionResults *results);
/**
- * @brief Gets the FacialLandmarkDetectionResults
- *
- * @since_tizen 5.5
- * @return @c true on success, otherwise a negative error value
- */
+ * @brief Gets the FacialLandmarkDetectionResults
+ *
+ * @since_tizen 5.5
+ * @return @c true on success, otherwise a negative error value
+ */
int getFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *results);
/**
- * @brief Gets the PoseLandmarkDetectionResults
- *
- * @since_tizen 6.0
- * @return @c true on success, otherwise a negative error value
- */
+ * @brief Gets the PoseLandmarkDetectionResults
+ *
+ * @since_tizen 6.0
+ * @return @c true on success, otherwise a negative error value
+ */
int getPoseLandmarkDetectionResults(std::unique_ptr<mv_inference_pose_s> &detectionResults, int width, int height);
mv_engine_config_h getEngineConfig(void)