Fix tizen header checker warn 61/313161/3
authorKwanghoon Son <k.son@samsung.com>
Wed, 19 Jun 2024 12:02:05 +0000 (21:02 +0900)
committerKwanghoon Son <k.son@samsung.com>
Thu, 20 Jun 2024 00:10:34 +0000 (09:10 +0900)
tizen-native-api-review-script check message,

Please consider changing tabs in comments to spaces.
Tabs may cause aligned comments to be shown incorrectly,
depending on the editor's tab width setting.

Change-Id: Id49bd94502ab5599dcec372fd28c71b2ed7a5646
Signed-off-by: Kwanghoon Son <k.son@samsung.com>
include/mv_common.h
include/mv_inference.h
mv_machine_learning/inference/include/Inference.h

index 9287945890d9a88bfbee9c043e4210726af90df5..5b587b5a3bd2f7d7ed970bcf044137989b67cecc 100644 (file)
@@ -584,7 +584,7 @@ int mv_engine_config_get_string_attribute(mv_engine_config_h engine_cfg, const c
  *                               configuration dictionary
  * @param [out] values           The attribute to be filled with the array of
  *                               string value from dictionary
- * @param [out] size                    The number of elements in @a values
+ * @param [out] size             The number of elements in @a values
  * @return @c 0 on success, otherwise a negative error value
  * @retval #MEDIA_VISION_ERROR_NONE Successful
  * @retval #MEDIA_VISION_ERROR_NOT_SUPPORTED Not supported
index 6f5005caaf8a8f98554ec99de8da5efc490093c7..4aa34100883e86c16623eb4c723c6102d7a90039 100644 (file)
@@ -735,8 +735,8 @@ int mv_inference_pose_landmark_detect(mv_source_h source, mv_inference_h infer,
  *
  * @since_tizen 6.0
  *
- * @param[in] result                   The handle to inference result
- * @param[out] number_of_poses   The pointer to the number of poses
+ * @param[in] result           The handle to inference result
+ * @param[out] number_of_poses The pointer to the number of poses
  *
  * @return @c 0 on success, otherwise a negative error value
  * @retval #MEDIA_VISION_ERROR_NONE Successful
@@ -754,8 +754,8 @@ int mv_inference_pose_get_number_of_poses(mv_inference_pose_result_h result, int
  *
  * @since_tizen 6.0
  *
- * @param[in] result                           The handle to inference result
- * @param[out] number_of_landmarks   The pointer to the number of landmarks
+ * @param[in] result               The handle to inference result
+ * @param[out] number_of_landmarks The pointer to the number of landmarks
  *
  * @return @c 0 on success, otherwise a negative error value
  * @retval #MEDIA_VISION_ERROR_NONE Successful
@@ -773,13 +773,13 @@ int mv_inference_pose_get_number_of_landmarks(mv_inference_pose_result_h result,
  *
  * @since_tizen 6.0
  *
- * @param[in] result            The handle to inference result
- * @param[in] pose_index        The pose index between 0 and
- *                              the number of poses which can be gotten by
- *                              mv_inference_pose_get_number_of_poses()
- * @param[in] pose_part         The landmark index between 0 and
- *                              the number of landmarks which can be gotten by
- *                              mv_inference_pose_get_number_of_landmarks()
+ * @param[in] result         The handle to inference result
+ * @param[in] pose_index     The pose index between 0 and
+ *                           the number of poses which can be gotten by
+ *                           mv_inference_pose_get_number_of_poses()
+ * @param[in] pose_part      The landmark index between 0 and
+ *                           the number of landmarks which can be gotten by
+ *                           mv_inference_pose_get_number_of_landmarks()
  * @param[out] location      The location of a landmark
  * @param[out] score         The score of a landmark
  *
@@ -802,10 +802,10 @@ int mv_inference_pose_get_landmark(mv_inference_pose_result_h result, int pose_i
  *
  * @since_tizen 6.0
  *
- * @param[in] result            The handle to inference result
- * @param[in] pose_index        The pose index between 0 and
- *                              the number of poses which can be gotten by
- *                              mv_inference_pose_get_number_of_poses()
+ * @param[in] result         The handle to inference result
+ * @param[in] pose_index     The pose index between 0 and
+ *                           the number of poses which can be gotten by
+ *                           mv_inference_pose_get_number_of_poses()
  * @param[out] label         The label of a pose
  *
  * @return @c 0 on success, otherwise a negative error value
index bf4e47238edd7c9148195bc2bc822ccd52b8d68a..269f8b037457a66f202b1c89774aaa5685b54ae2 100644 (file)
@@ -92,10 +92,10 @@ struct TensorInfo {
 
 struct InferenceConfig {
        /**
-                * @brief Default constructor for the @ref InferenceConfig
-                *
-                * @since_tizen 5.0
-                */
+        * @brief Default constructor for the @ref InferenceConfig
+        *
+        * @since_tizen 5.0
+        */
        InferenceConfig();
 
        std::string mConfigFilePath; /**< Path of a model configuration file */
@@ -126,33 +126,33 @@ class Inference
 {
 public:
        /**
-                * @brief   Creates an Inference class instance.
-                *
-                * @since_tizen 5.5
-                */
+        * @brief   Creates an Inference class instance.
+        *
+        * @since_tizen 5.5
+        */
        Inference();
 
        /**
-                * @brief   Destroys an Inference class instance including
-                *           its all resources.
-                *
-                * @since_tizen 5.5
-                */
+        * @brief   Destroys an Inference class instance including
+        *           its all resources.
+        *
+        * @since_tizen 5.5
+        */
        ~Inference();
 
        /**
-                * @brief   Configure modelfiles
-                *
-                * @since_tizen 5.5
-                */
+        * @brief   Configure modelfiles
+        *
+        * @since_tizen 5.5
+        */
        void configureModelFiles(const std::string modelConfigFilePath, const std::string modelWeightFilePath,
                                                         const std::string modelUserFilePath);
 
        /**
-                * @brief Configure input information
-                *
-                * @since_tizen 6.0
-                */
+        * @brief   Configure input information
+        *
+        * @since_tizen 6.0
+        */
        int configureInputInfo(int width, int height, int dim, int ch, double stdValue, double meanValue, int dataType,
                                                   const std::vector<std::string> &names);
 
@@ -160,7 +160,7 @@ public:
                                                        const std::vector<inference_engine_tensor_info> &tensors_info);
 
        /**
-        * @brief Configure input information from model meta file.
+        * @brief   Configure input information from model meta file.
         *
         * @since_tizen 7.0
         */
@@ -169,7 +169,7 @@ public:
        int configureInputMetaInfo(mediavision::machine_learning::MetaMap &inputMetaInfo);
 
        /**
-        * @brief Configure output information from model meta file.
+        * @brief   Configure output information from model meta file.
         *
         * @since_tizen 7.0
         */
@@ -178,76 +178,76 @@ public:
        int configureOutputMetaInfo(mediavision::machine_learning::MetaMap &outputMetaInfo);
 
        /**
-                * @brief   Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
-                *
-                * @since_tizen 6.0
-                */
+        * @brief   Configure inference target devices such as CPU, GPU or NPU. (one more types can be combined)
+        *
+        * @since_tizen 6.0
+        */
        int configureTargetDevices(const int targetDevices);
 
        /**
-                * @brief   Configure the maximum number of inference results
-                *
-                * @since_tizen 5.5
-                */
+        * @brief   Configure the maximum number of inference results
+        *
+        * @since_tizen 5.5
+        */
        void configureOutput(const int maxOutputNumbers);
 
        /**
-                * @brief   Configure the confidence threshold
-                *
-                * @since_tizen 5.5
-                */
+        * @brief   Configure the confidence threshold
+        *
+        * @since_tizen 5.5
+        */
        void configureThreshold(const double threshold);
 
        /**
-                * @brief   Parses the metadata file path
-                *
-                * @since_tizen 6.5
-                */
+        * @brief   Parses the metadata file path
+        *
+        * @since_tizen 6.5
+        */
        int parseMetadata(const std::string filePath);
 
        /**
-                * @brief   Bind a backend engine
-                * @details Use this function to bind a backend engine for the inference.
-                *                      This creates a inference engine common class object, and loads a backend
-                *                      library which interfaces with a Neural Network runtime such as TF Lite,
-                *                      OpenCV, ARMNN and so on.
-                *
-                *                      Ps. The created inference engine common object will be released and its
-                *                              corresponding backend library will be unbound when deconstructor
-                *                              of Inference class will be called.
-                *
-                * @since_tizen 6.0
-                *
-                * @return @c 0 on success, otherwise a negative error value
-                * @retval #MEDIA_VISION_ERROR_NONE Successful
-                * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
-                */
+        * @brief   Bind a backend engine
+        * @details Use this function to bind a backend engine for the inference.
+        *          This creates a inference engine common class object, and loads a backend
+        *          library which interfaces with a Neural Network runtime such as TF Lite,
+        *          OpenCV, ARMNN and so on.
+        *
+        *          Ps. The created inference engine common object will be released and its
+        *              corresponding backend library will be unbound when deconstructor
+        *              of Inference class will be called.
+        *
+        * @since_tizen 6.0
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        */
        int bind(int backend_type, int device_type);
 
        /**
-                * @brief   Load model files
-                * @details Use this function to load given model files for the inference.
-                *
-                *                      Ps. this callback should be called after Prepare callback.
-                *
-                * @since_tizen 6.0
-                *
-                * @return @c 0 on success, otherwise a negative error value
-                * @retval #MEDIA_VISION_ERROR_NONE Successful
-                * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
-                */
+        * @brief   Load model files
+        * @details Use this function to load given model files for the inference.
+        *
+        *          Ps. this callback should be called after Prepare callback.
+        *
+        * @since_tizen 6.0
+        *
+        * @return @c 0 on success, otherwise a negative error value
+        * @retval #MEDIA_VISION_ERROR_NONE Successful
+        * @retval #MEDIA_VISION_ERROR_INVALID_PARAMETER Invalid parameter
+        */
        int load();
 
        /**
-                * @brief       Runs inference with a region of a given image
-                * @details Use this function to run forward pass with the given image.
-                *          The given image is preprocessed and the region of the image is
-                *          thrown to neural network. Then, the output tensor is returned.
-                *          If roi is NULL, then full source will be analyzed.
-                *
-                * @since_tizen 5.5
-                * @return @c true on success, otherwise a negative error value
-                */
+        * @brief   Runs inference with a region of a given image
+        * @details Use this function to run forward pass with the given image.
+        *          The given image is preprocessed and the region of the image is
+        *          thrown to neural network. Then, the output tensor is returned.
+        *          If roi is NULL, then full source will be analyzed.
+        *
+        * @since_tizen 5.5
+        * @return @c true on success, otherwise a negative error value
+        */
        int run(std::vector<mv_source_h> &mvSources, std::vector<mv_rectangle_s> &rects);
 
        template<typename T> int run(std::vector<std::vector<T> > &input_tensors);
@@ -255,51 +255,51 @@ public:
        int run();
 
        /**
-                * @brief       Gets that given engine is supported or not
-                *
-                * @since_tizen 5.5
-                * @return @c true on success, otherwise a negative error value
-                */
+        * @brief   Gets that given engine is supported or not
+        *
+        * @since_tizen 5.5
+        * @return @c true on success, otherwise a negative error value
+        */
        std::pair<std::string, bool> getSupportedInferenceBackend(int backend);
 
        /**
-                * @brief       Gets the ImageClassificationResults
-                *
-                * @since_tizen 5.5
-                * @return @c true on success, otherwise a negative error value
-                */
+        * @brief   Gets the ImageClassificationResults
+        *
+        * @since_tizen 5.5
+        * @return @c true on success, otherwise a negative error value
+        */
        int getClassficationResults(ImageClassificationResults *results);
 
        /**
-                * @brief       Gets the ObjectDetectioResults
-                *
-                * @since_tizen 5.5
-                * @return @c true on success, otherwise a negative error value
-                */
+        * @brief   Gets the ObjectDetectioResults
+        *
+        * @since_tizen 5.5
+        * @return @c true on success, otherwise a negative error value
+        */
        int getObjectDetectionResults(ObjectDetectionResults *results);
 
        /**
-                * @brief       Gets the FaceDetectioResults
-                *
-                * @since_tizen 5.5
-                * @return @c true on success, otherwise a negative error value
-                */
+        * @brief   Gets the FaceDetectioResults
+        *
+        * @since_tizen 5.5
+        * @return @c true on success, otherwise a negative error value
+        */
        int getFaceDetectionResults(FaceDetectionResults *results);
 
        /**
-                * @brief       Gets the FacialLandmarkDetectionResults
-                *
-                * @since_tizen 5.5
-                * @return @c true on success, otherwise a negative error value
-                */
+        * @brief   Gets the FacialLandmarkDetectionResults
+        *
+        * @since_tizen 5.5
+        * @return @c true on success, otherwise a negative error value
+        */
        int getFacialLandMarkDetectionResults(FacialLandMarkDetectionResults *results);
 
        /**
-                * @brief       Gets the PoseLandmarkDetectionResults
-                *
-                * @since_tizen 6.0
-                * @return @c true on success, otherwise a negative error value
-                */
+        * @brief   Gets the PoseLandmarkDetectionResults
+        *
+        * @since_tizen 6.0
+        * @return @c true on success, otherwise a negative error value
+        */
        int getPoseLandmarkDetectionResults(std::unique_ptr<mv_inference_pose_s> &detectionResults, int width, int height);
 
        mv_engine_config_h getEngineConfig(void)